var/home/core/zuul-output/0000755000175000017500000000000015134167243014533 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015134203403015464 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log.gz0000644000175000017500000317350715134203304020261 0ustar corecoreqikubelet.log_o[;r)Br'o b-n(!9t%Cs7}g/غIs,r.k9GfD P ?Eڤ펯_ˎ6Ϸ7+%f?長ox[o8W5-b6"οƼ>UWm׫Y_?|uݗ[y[L-V_pY_P-bXwûxwAۋt[~ _P^~&RY,yDy~z]fs,l<L& " d :o5J=nJw1f /%\xiƙQʀClxv< |N ?%5$) y5o? fۮ?tT)x[@Y[`VQYY0gr.W9{r&r%LӶ`zV=Tooz2¨(PQ wFh k0&S V3M.*x6Ql"%qYHzn4}*|dd#)3c 0'Jw A57&Q"ԉQIF$%* 4B.K$*/Gmt΍L/1/ %T%e63I[wdt6o[ .`:J ]HmS>v5gCh31 )Kh3i J1hG{aD4iӌçN/e] o;iF]u54!h/9Y@$9GAOI=2,!N{\00{B"唄(".V.U) _.f*g,Z0>?<;~9.뙘 vKAb;-$JRPţ*描Լf^`iwoW~wSL2uQO)qai]>yE*,?k 9Z29}}(4ҲIFyG -^W6yY<*uvf d |TRZ;j?| |!I糓 sw`{s0Aȶ9W E%*mG:tëoG(;h0!}qfJz硂Ϧ4Ck9]٣Z%T%x~5r.N`$g`Խ!:*Wni|QXj0NbYe獸]fNdƭwq <ć;_ʧNs9[(=!@Q,}s=LN YlYd'Z;o.K'[-הp|A*Z*}QJ0SqAYE0i5P-$̿<_d^"]}Z|-5rC wjof'(%*݅^J">CMMQQ؏*ΧL ߁NPi?$;g&立q^-:}KA8Nnn6C;XHK:lL4Aْ .vqHP"P.dTrcD Yjz_aL_8};\N<:R€ N0RQ⚮FkeZ< )VCRQrC|}nw_~ܥ0~fgKAw^};fs)1K MޠPBUB1J{Ⱦ79`®3uO0T-Oy+tǭQI%Q$SiJ. 9F[L1c!zG|k{kEu+Q & "> 3J?5OͩLH.:;ߡ֖QʡCOx]*9W C;6)SCVOאUʇq )$ {SG!pN7,/M(.ΰdƛޜP16$ c:!%Piocej_H!CEF L훨bِp{!*({bʂAtĘ5dw9}ŒEanvVZ?C}!w,ƍͩ?9} [oF2(Y}Q7^{E}xA|AŜt;y}=W<*e'&Ж0(ݕ`{az^su/x)W>OK(BSsǽҰ%>kh5nIYk'LVc(a<1mCޢmp.֣?5t罦X[nMcow&|||x:k/.EoV%#?%W۱`3fs䓯ҴgqmubIfp$HhtLzܝ6rq/nLN?2Ǒ|;C@,UѩJ:|n^/GSZ;m#Nvd?PqTcLQMhg:F[bTm!V`AqPaPheUJ& z?NwpGj{VjQS,؃I'[y~EQ(S +mpN, Mq 70eP/d bP6k:Rǜ%V1Ȁ Z(Q:IZaP,MI6o ޞ22ݡjR:g?m@ڤB^dh NS߿c9e#C _-XѪ;Ʃ2tStΆ,~Lp`-;uIBqBVlU_~F_+ERz#{)@o\!@q['&&$"THl#d0 %L+`8zOҚƞ`wF~;~pkѽ)'cL@i]<ք6ym®Yi&s`dyMX](^!#h k:U7Uv7쿻чd)wB5v-)s蓍\>S[l52, 5 CۈP$0Zg=+DJ%D  *NpJ֊iTv)vtT̅Rhɇ ќuގ¢6}#LpFD58LQ LvqZDOF_[2arH_HI\:U}UE$J @ٚeZE0(8ŋ ϓ{K0D"\KjPQ>Y{Ÿ>14`SČ.HPdp12 (7 _:+$ߗv{wzM$VbήdsOw<}#b[E7imH'Y`;5{$ь'gISzp; AQvDIyHc<槔w w?38v?Lsb s "NDr3\{J KP/ߢ/emPW֦?>Y5p&nr0:9%Ws$Wc0FS=>Qp:!DE5^9-0 R2ڲ]ew۵jI\'iħ1 {\FPG"$$ {+!˨?EP' =@~edF \r!٤ã_e=P1W3c +A)9V ]rVmeK\4? 8'*MTox6[qn2XwK\^-ޖA2U]E_Dm5^"d*MQǜq؈f+C/tfRxeKboc5Iv{K TV}uuyk s" &ﱏҞO/ont~]5\ʅSHwӍq6Ung'!! e#@\YV,4&`-6 E=߶EYE=P?~݆]Ōvton5 lvǫV*k*5]^RFlj]R#Uz |wmTeM kuu8@8/X[1fiMiT+9[ŗ6 BN=rR60#tE#u2k *+e7[YU6Msj$wբh+8kMZY9X\u7Kp:׽ ^҃5M>!6~ö9M( Pnuݮ)`Q6eMӁKzFZf;5IW1i[xU 0FPM]gl}>6sUDO5f p6mD[%ZZvm̓'!n&.TU n$%rIwP(fwnv :Nb=X~ax`;Vw}wvRS1q!z989ep 5w%ZU.]5`s=r&v2FaUM 6/"IiBSpp3n_9>Byݝ0_5bZ8ւ 6{Sf觋-V=Oߖm!6jm3Kx6BDhvzZn8hSlz z6^Q1* _> 8A@>!a:dC<mWu[7-D[9)/*˸PP!j-7BtK|VXnT&eZc~=31mס̈'K^r,W˲vtv|,SԽ[qɑ)6&vד4G&%JLi[? 1A ۥ͟յt9 ",@9 P==s 0py(nWDwpɡ`i?E1Q!:5*6@q\\YWTk sspww0SZ2, uvao=\Sl Uݚu@$Pup՗з҃TXskwqRtYڢLhw KO5C\-&-qQ4Mv8pS俺kCߤ`ZnTV*P,rq<-mOK[[ߢm۽ȑt^, tJbظ&Pg%㢒\QS܁vn` *3UP0Sp8:>m(Zx ,c|!0=0{ P*27ެT|A_mnZ7sDbyT'77J6:ѩ> EKud^5+mn(fnc.^xt4gD638L"!}LpInTeD_1ZrbkI%8zPU:LNTPlI&N:o&2BVb+uxZ`v?7"I8hp A&?a(8E-DHa%LMg2:-ŷX(ǒ>,ݵ𴛾é5Zٵ]z"]òƓVgzEY9[Nj_vZ :jJ2^b_ F w#X6Sho禮<u8.H#',c@V8 iRX &4ڻ8zݽ.7jhvQ:H0Np: qfՋ40oW&&ף \9ys8;ӷL:@۬˨vvn/sc}2N1DDa(kx.L(f"-Da +iP^]OrwY~fwA#ٔ!:*땽Zp!{g4څZtu\1!ѨW(7qZcpL)ύ-G~^rFD+"?_h)yh=x>5ܙQ~O_e琇HBzI7*-Oi* VšPȰһ8hBőa^mX%SHR Fp)$J7A3&ojp/68uK͌iΙINmq&} O L-\ n4f/uc:7k]4p8wWLeUc.)#/udoz$} _3V6UݎvxyRC%ƚq5Щ/ۅw* CVo-1딆~ZYfJ"ou1ϵ5E bQ2mOΏ+w_eaxxOq:ym\q!<'J[FJ,4N:=6. +;$v6"I7%#CLTLyi{+ɠ^^fRa6ܮIN ޖ:DMz'rx#~w7U6=S0+ň+[Miw(W6 ]6ȧyԋ4ԙ./_A9B_-Z\PM `iĸ&^Ut (6{\٢K 5XGU/m >6JXa5FA@ q}4BooRe&#c5t'B6Ni/~?aX9QR5'%9hb,dsPn2Y??N M<0YaXJ)?ѧ| ;&kEYhjo?BOy)O˧?GϧmI C6HJ{jc kkA ~u?u7<?gd iAe1YB siҷ,vm}S|z(N%Wг5=08`S*՟݃*־%NǸ*kb05 V8[l?W]^@G:{N-i bɵFWǙ*+Ss*iނLgZ:ʆy<>b1Npr~3Ӄu$J!̖G8>B*y~ʰVIyEoJ-|.1٦@ږJQ&շy»QU5ٲbkm@ B̠Yߒ`rH&ކE|@$XXg1/ 4s}d0!ֵ9lX #QYd.x3'sJHt**feYŇ{\S={7CfN:XCJFYE10]@ 2O=d.h, :<%uab`~uu}Y{x<"~-KU3Wafjop:P7M/ÓUVDfX[Gk7Tb+X4 sKHD-]ĻF y=n@H2MBD-  м XJ ޔE-M_|8/^, z:dM0 xj{⛪o.rDƂ( N5ń|&UΛXl@D֠W;p=]Kt8f?_qXW"Jzȁ+Mc傄F}}5VB>^E^aRBHR#cйZwN8ikk*T^}ܪ#Ã]9Kϻ;cP k ݒ14*ch7䀹*;>O杙~d4%0-"םF0GRΔXۘJ\{{\ͼGao& oŽ] Iee9]r0g!»;wDû"}uz7kk#N\nH%AC8k~zXs6.&u8[ݍU.MPZhr5|FaG6ݑTkr52<vj{0]M)'? gYgNz3,LgٚD!aUD2fIn0P23d;n:POzs=}bo ȜLʚC}#zl$SfW ocmRvGDQtIǒe_$s3= TuY6̫, AeljFYFD0wBCӗ\vnAPjv8"d I~=T'+袹kGdpzD>s #rᐃJ=DIIu"O@ƪ.];2*b YW5"`RJ)8/>WPUYA"(RO]2W3ցAgmoWE9_e:煐خ!9@{4d@S`GkM{t<+- kk; [K3hTLD=v-vr ,b]`lCr:Syu|/l2OfMj."e uKN&[ݢJuKE_r_u-N߿?kߏ^1o {wN+K+%@2@M(juM{ d[nk=*w_[훏FɈrNb}SHLv'UU6' dgx@fmd0.$ϻ2zI*(ƢzǓgz5CM4 '735QUnqh_uFV.@D2obETjJMn$sܦ&j Dd]>e~x{rk61s0IH "!yf2>\m?LnkFZDp>Hp}?E?R1"dĞp5Ba>9' B/4( 'kI5'դ1X֞:y RxH.<߅Zr5^g0w՗:)5F1CڣȐ5Wc0x⛘<&~kRyPƏ=.ts>.D5X29o9lO.*\z?g{OFZfg緷 NUI3EΧ ,%k,:`@-'{ʺne+,k"$*Z6/A{ @$.`,كs:|yl .2>O7ϭ]y8ǚ9DP @EP,d(1# YX> /Fx zjٛU3< 0y =DA$w,66B9_լ#DyZ I=cA(25%G2k3ZbM(z$fQUodwG * C;x}r>W.)ZZ*GпGE Tfp`ƿsb3Mzx魳oK,`} ~a3SȼC߅"㍚ dmhMd쩊tSjTɇ>,G2Nmhkc]~7N2O8n-?2_eH]3-辽auaS7mվnU=,NY&]$oU5]9[Up`y,\KC\Ih㫓[]_]vS͝2aU6ըe^޸͠Ɯ kPEIN~xܽJN81wEζF_!&U=5_33\[ /RheX[EZNOl/M!}'}iE`{aE qb3ND%'ӱz TP:;n}rK_ldb"sdy @H<K6{)`{H$͙̇"*Сc#nN8Fh]7/,lMх ޮj{NL / rwu%bߐu{( d-Жo VGKzkco=QbERʲ(e-Z!Gmcr$@_xWhG&Vw_8ߪ*Y:, o[_o`3goSΒwQIXߊ[+=ku!Au +5?ZA阗$MBu}x{Ww`’:<kJZ}9yJ,d6ė·zTqA!/(@5PnjI]tIٌP=E/Lf 5Kb}K>ƂiG xh!)^ӧ}%T!N d.:i!%D_d%5BEh,#C)I}vrc )8P * hkMVV{|%_}ػ./E9| vr8s`V `J&]^|/{ě~w_3sFLt}bAݒJirJDիyR0}?5x98-$vPXbаCOvF !6RYt+ Y+Y}#̶evA;)Y ܣR[mun/~+|ڤ:dF/4$2 qla,9G&*U dl2[wDCv SMQEn$eU[Fɧ,'Jp߆GM+l+BTpD؅#$3jG4E|“6e S%MJ-`B.bVRiL)\(RHO<ӿڦtR\RCΆ}e|>C]Ω/\dl i6뤋Q7+QxlxYD>EK蛨A~駤FhJuTO{yQ0Ӱ̛j).47 nޯ7֫~ 狸~%MiH _K~,Z׽KVėOC{w_oZ/D.^!jtDrilFC1 x1Y&.i|TOIn7tp<'j<?읉8FHޗy4DKK v2UTMUӜ=Bc#_=Ƕ52\RRaIV'¶6!=8?8[Y|-ɬe Ǫzd;-s~KM>e:9[v~\:P 8'k01Q1jlX)/ΏL+ΆhBUx~Ga>Z"Q8_wjTLRˀtB L+BT҂=ll魳cf[L̎`7rK+S- (J[(6 b F? ZvƂcW+dύ-m𢛲@m=Uyzo%pg/cc6Rs`HFOLЩ LkJu\!`0);Sak$Vfp~KuV3A7ำqA*\qb'YpuHƩҬV nm=\ȁ-2=|5ʥ zi 8$ s8aK4%V\ t!Lku`+]c0h&)IVC)p| QUA:]XL/2La|[Xѓ V;/-r|x-rei0hEݸDt#{I}8`v;jUvK S x1Q2XU&.k&lE"} Q\E)+uw>.,SzbQ!g:0r5aI`"Ǒm O\B!,ZDbjKM%q5E](>Hm 2z#Eh^&hBk X%t>gY #)v緢OV'| d1 =_SEp+%L1OUaY쎹aZNnDZfV{r&ȑ|X!|i*FJT+g_դ}Y(INBKhx2 *MOemT.a~)E jg)jlƨ|=a@Զ߼/ȇlCf0hM`~ ƾ}O+D7 P=xD`0ʿ2a>d Bq޵^Ų gwpГz]'ť$:fr;)ٖf ՠ3)?<UݸoRZ?x^TE.1߬VսyoSSۘv ?D s{]~/F{}j\^?_/̝õ*suw{[\?]^'W_nAqA_oO-S_sOq?wB{mmKo2/@DJt{=xL@5M8G0ZY,\S Eb mw:YɊ",1=qOA;?U} 1^:XK \ )@+q$}* y 0Zh?EV"sd!N@БU ^p%pO3[|BUxY$߾l0kqi G#A7eVG&/EV$۟gWb 1؁5=2.怕vRqR9~ i±za+HFNi>. EWz:V^6YEs5Ȭ N *7{!fRБBSۘ– Er/IG/UcAPQT]|XN X]^FbKjKdpH U6{3T\X)|*HN'2U0:VumBl ۏ `9/@ա06VNO8VGpHN@KgjyK?Wy9egI+ I-*F~L!Gf"LD&U 6tGd#fR*c ^tSLjnKS9 Ȼ \ >ls&}+̼d"I ve,Jm_u)d靕َ"4pjfљ lݍ )`8dM-})C?Fw.IJ_3U8D'+¨[9| !80?\+ xJpΕ`p~mg%F\ R a0;j`C4J:g}:0o[j2zZKTa+Cmޯv+:ÈvPv\Xby;Zbt Ŗz6H9VMv'ˀ*E%N3o2c06JZW?V g>el\) Z.`oqD>tDN _7c$h3`lg\)[h+pLBr^J |rG\8SzEnvDqZbRT1e8V Scc6:$[|a.fpU`ZR֩bKgTlѩyۢUdEcaLV&"FhQ|![gIK v~,Jc%+8[dI368fp.CDsc3k.2WM:UŰ~{N_w7p682~ =WBX"XA:#u-9`x 92$4_!9WvTIj`+C2["s%DƖ|2H\2+AaTaBˮ}L@drVfc>$Ȃ1L-'{քGdɱL ;V[bp>!n&աIJX)$9{[?- й nRcxKVV+#lj@_RoL;IQŸŢތXD@Zu\QbR9GuB/S5^ga7NWQ`LRx+I&s5Ww` q>12e_ʿd{zlzUܟlDU j>zƖݗ&!jC`@ qэ-V Rt2m%K6lX)"}lj齔Y:8VmS1:Wh#O?0} :OVGL.xllT_oiqqLec2Xdy-Qd)R]n4[Pu$醨!8) B 8dJ3ccKo̹\B~I&i㧃$>l1nE6=W|$/ -{]1AkE`AW/lw&5fHlB l5`:~Bc>q,7}VE-QW70up˳ A¦g/EU:غA>?=CֻPqȅlW11/$f*0@б 2Dݘrt"?+qr<sz1r~Mу0c5$!SzF[qc2.R3`Hz)2}Ε UT `u6%OOE-56+0-v/Xŷ%r׽nl-ߑ?B*le:pN`Qibt r RveBt1NPsTomDvU`tiz5Ӄ~ &$yx `iKA"*1 [Cplm_uЏȩl0^1Yg5;~ؾ?jVXioR^]*xsnnw2uY(nE|sUExUɵyzoSSۘV.nCTߋ\_W70Z==߫˜+|{_ӎ'|;s^ M7ݿ >5\-~VZpsqJ5 څB}>u)^v~,󿴛} 3+m𢛲Pz_Sp6auUAP*tLn$l{nlq&җ^Os RTnp{Pgi-b)>U}m*7XibRdϽe ӼĢ8gɄnb/g ֤ػ޶]W|_*[onvwӴ.pb$lH^d!%q$jFbṙ̐)Ʀ QYE x78XU",GbXaNy6}r$F 24I5Jb2w=R"cU\4K@ XmRcUƻ|Xՠ:0~ƪJwDYI%nJrl!*J%\HxD UASV漹Օu;nOOyPH?>Uaj/o"kGI&2h-Ń2R,&H_i ƪWay] }U9k)c \UsŘ **4/Gg<=CE1jR E<}wÓNwxME&{q:/兖o/Ho~,LK şG, "%'~;M])C; %}| ( K4(҅CFdhL|BSk8 <"|w3^̖kK%&"-!9w0m1-q-@B%Rd2B["%7ٗ~7Ē7uۋ%I\Ϝ =3L {޳2Dm߫KyDD JZ.xOTbZb2{ ŶwIYj]TQgFlc,&l#4XDnZ͜Ȱ ƙ=ߌCQ3?2i&!o :_O&wGID*St$y 2.K^`šT(c|Te:9bz= Yͫj֎%oIq\ ʛGI,Ic1Ir,RAZYmHS-j߄yUY2*/x4) \@ $n/K xh㝥iZ =P~EE/('s–Wb:9ixKw<)V<y5m,񧓧3hKD6=эMk87?|]9k0< 2*`D F,M`W"i%WrƕvѩP ?`{˝m֚Xp/sqѲ&M+xt]zu۴ׇu٠%4΂>%['g/ q,N8*kw|^,D)NN>UJ+gg,_LYKwEؔI}ݍ4 $>u]wOm !5OK)dz40 UHYqu9 3vAԗ8 >#qPx y\ OMoʻ:] / F֜c #k\z a}Q$ZEK,V Y/iaF)s^?Wx4 x4Cxɼ4`E]&!سƩSJ hTc>Ya>ە.N2%z V$ge&$mE6u`L|YOe<&0&o:=_/v0 F9z4!4k9^y? i":4Ge7 xFDCBcs;סie%J&ǼA_Lq~=>#Aw@;9E޿9票aR,{a\jӅuҠnm~/5>$lw+qܥN:vQv<& $Cӏ (G?i@v7?`3m4 $myS4MvM;0m;㘂D4Gh"}w@)R)"+ mW`J`dQh. ='֒=}x0+J*.Pw:+ylD4(R5~cwF :8d>yFDc?*%% EB iQ@t/c&XVDK")gRߢ-f++L0X/t"5`V@[0ӷU{djJlt9$X{s"oաQ }YGˤM/+- "v0FO[)ȫԇoK= K uh|cK@h@ض@4[EUFf$ʎ) Yў.MX.-+o<>zBȨ2p2ׇ5ܧRd{"ZZ|چ6Clx]CE j*DzaBi ujVd$YndC8^ Clfy!kX9*T,ѹ |ЎzTR6p&٣HļIC"}| ThXÛ$n'kOߒnZ]Jzə)c[d'34ʪJj pYTYq&-BOd%E(2T%)[hU{_5HC12ŦkIn*?KLB}FP^^aDuW9SV><׭nM/x\dXhK{kV n2e1mG&㘻Gb_M#7czvXVDkW/?& :p@׹d ={kz]ʖHL֫i3 ؒXѓ)˼J}{|%8iTu!"][W 1чv'o,e<rڳk2 ,Qn6z H+Ԭa(bX "wW,V!`htәBV;eU&7VywDjFX!g3 kO"I ^UOVwHWX` ,ޡܟXg<M==íœw!zHōI:y:oHU.^5 \CS׳WXL7OW־ a3T`d7*FN7}gkhU,t$a^i}sE*]B\"U%Z2o1C>@zkeP_BGiI"yYI`+lZkKڰ^f 1Sm@լڪq+3{Knu34>P|:UY&fz`lJp2Jj1Jw1KaTDYܥZ' / Eo3pZ]iZfZ VP8{tY֛ߤ[ bqP{ҀY*vK(޳7fk<_Gpy[z@l1@  9n|7} X؞fAiyaQIbs7 loOM1W8>H|XeqgoZ|w~=!8>ZTa>s_{>Ξ42(eo l1Ed m'\]^|wU<&1v Gig`84 0_"QqnwAVevκ]?q,!wtB(6dgbUE1.1P3j݇(<$\nyuN63c/";:!}]zD ]k7_Ʃq\+*W{$/u+Xj W3E9I<;p-ɉ!v>-׭VwWZӇ9.^zNRR!t$+-Wij֎Ӗ yVK;R(yϳt4sFlt9`ET'|CLzfkGk?e}:=|$3}ݰ ~͆֔l~*):7TOӸL?xr9Co sAf{yjWߞ>P5tOv&Ɖ!_Vջ뻬o y՛`sֽJC~x?>qoG9ztݱ%a(+}}k.ۄDiGΔK _xלT%z [ Cq `0L8?z> %(@0=PğPO|q+nEtD#pY=@#1%씃Cʻp:6֝$B,$6ˈB G! ۸/ 0|#Et. PD׏P®ħ8xD…?v[X䌅M"|Ay'bI1n.! ~MnD5#+Ft Zp`C@V#Ju"왜6Tod"ॣD,a6F#"+".1 _R֡cL$q'W2HxB,Jca {=XFZZ8R a!-cXyW ~/ 5Ms>')1Yw݁ @ &?=a_=몐': i]LR jO;e5IZs36HjDsOhr"x:Kf~u%4@y6$,$̖%q8i5J27)'9iE[JncoD9,IʎD9Hn\]p>"XU_oɟjozH-dĶ|(k݊t--Kw&Iڼ˧]_ZkI6&ťk{{H7/Z]𛗮zE(ſsH lɶ)= H"7iQQҳɳn>Yl3PzGϒHz!J-JR30{aҷ0=G"[؇Z#} MWX黬#18hWV ;%+{_~8PŁI gs)Rz yES@P;)C.%3Hd*Wr,G8G4epDY$'Tsw!Ӣsϫ(ɘ:SAD{$gpq>Tٗ(MjM rJ?atJoՍ6YtcxCljur?RՕ dU?/+z,pk%lEj7g}1Å'_#DgѨO^%| 0F ٨"CDtD5h>7u=׹oյ|Fjypo1r@W֝JkU;tS Rz?;w1Wi1ܥhذ8A@r4EkE&}j2d.?:%Ey*ڻdy9G'VK vN~؟]Q{t@UKIQޠ+ye'*ŋ<#Λ/Q{(7a߽~u~T\gtz2OϣnSu St{.7u5n- ,LP!獘?̕~6 ,!'j!%0 }eqpD: {<t"IGiq<SXaC>BԼ2pgpꋇfo@]0 =Yj˯`LQt R$?ŕ*=n^n eK!E MᲲoLT^Vͧ.+[6%|. >EA(5\& ֧8W?"HTc@A e^۔.\Ezk Pk: C׽g}9gjm}FhF$N7E = F$Yz zH4-Xa큭 lR ^g^ F4"s/(D5OqᨋV,BAl, ہM=vY(-:&0\i4p"ؒE()`4[[d0`AitD)ҁ`PQD~r`P4Lj7T@3{zFTl1,(F>c1+@z.̕]No𦑏 wTi/&1 wU64t:./Q{h*QϞN֑8;&m Xtu](d)_AJUB eO#=P wmӧʞثʶ׻U>kڷmzR˷ oO+-uW u ݞPi$݂PoPo B FHB-W ߞPi$߂`` B FhHB- W 4ܞi$4܂PJ؂P=iG* T* TnO|5.RJڵГL)Qtin"5NKHj,glg Bw2qWEC'EH9l;"ٕ:PGsgVjx}VGVpiV.n0xgxo1H1=KdC?WюxA3};;8dtW$U@[8OAӯ ڱmOpmKwrg#~T??8V_Dcˋ"\ᣯru[rU6/Ǧ)8SǝneICw[GPtTI` P#0XH拇Z4?Ĉ~a75]$9qr|cA@E UҴU{KYH$e+%뻬~yׇyH}:YCiku G+kAQxBgUŘ K0=?aLYA7L`۰5 l75հGOUwBF% 'l?eggHɏ}xg.@T5s6UWt)iY0 sS۵?}r8\TTl PȪy܃ \ҫÏM:E`d K D.u%_a$&k2Njl0u8/N(\24\:rk|6%n~^݉v 7AШeqߞ[$ |SOb0Ȯ%M!&8,ɧ=E!YbDQ =%^eU,1D$ %b<㶞9] ARkCD,+S!*WT%8N-1Ԓ?o(lWMaI4N%Na-F L%iGc\;Ͱ艬3Q\Lu%hҧM]a$ڴlns$bg>: ̣W,  4Rg,8Ě`Ya?nP.t:KV~)8I9w`pFґVޮ~h/nR`;/Dgde\$[_y$FL8%ø1ieg|aC7 -pTW׎jJ7`E50Ggia\RD#śY\ɾ\TTHI<RBNc`Pwg߰U2 JZf`$]i{,6P"DzW\*S|IKi8"$N&8v->JP~V!zG&;Mp8ؓ/nuzq\Xu2C73REL^4EwI vmprwXLb5`}q}PDdᎅ1$hIIGr3H:Rw7-ҍe}PK$F} XJ;ɜVû1bf|>k/( @Jejg(Ӆ}7[.S ? Ge$5MJ7fv{6P94[yIOsMĩ1]A$6 ܚg$3q#AG WX&#IGd}kml#𥉉1=()$!B%Re@7 JUӈ`J8͖MpKjGu42**zTIno,ApvL!@6d=qE8E(`f&@Csx3߶\_W;I)qs:*s1Lj `$Q7CXd9hޯJ@K 8EH:l׎vmڝUn(v oCzEVˇfVX:{V])G[-K)sؐT7&N5) YSF?)}ImzGmΪGT߷8L&8v(3X .!&LoubS ޡaUg4AsZu7kl:Nm<>C~%~}X<4g?>[g;JȡXv;ѻ$T1b ӥ;yxX4Qߒ^y٬cJ&_U:RŇIgVyJ:h Fl܏O0Za\_ aJhewjQx΃E)z- {q@ eԥ)|_k76W7T';zq<'}(LߵK0S~^GQ9Luε8QXQ”ZଂQͧ8(J ;+Ҹ0Ɩls*wdH0)᜶ڳqG1kllgQDRrb1R`>GqFImq鑹Iǹ<=Nyީv1q=BPؾ!h8 V$|Yۿ761d5M6o< :Cf}(MTNvN <4n"gB{SRd6=ьrS ;{>b:n?r%?j#Щ@B]TJƠ!1Cr>%}j*v45q^"65ֲ! g4$2XH,fΣQ0bc%>ˌ~ Q|аt𮤄j2Iirp`|^RCgwΓ:|O'q҆ҀLo&p sQ؅ą* )b!ٸo>MIG77slFڻT.CY ֔Z0Ϫ(a7Sо̼(YѱZ5Xɲhcv|~jύv/hY%y FR9HZ蛀عmө J1 Mq# ,(=BE'sS (H16AauU`rPw>ur9_6Sp"5W%S$r)D\pd #RaعVe'14+e$!JFJƦK#$&2fka:g:}`Ʋ٤t8kFMsv.1NP{"@ {V4GJh8h*hTp M<\-@EUz_ #<¼jut)6Ѩpts8xi4jFZ7Rf@!$5oZSЧa,!6){f2X%KcfEL8%C∐tco[(Js><ޘ&8Y_2f7#Uʿbu:.3,[> -O~b%ؾk.?IE&y1jS,<=} ;Ooi{WS[&:]bZ&<ݻt6 Nmt O=rS}.@#XDtiC`cI%3߲55T=Z<&u;:dҐ֊Ǹ=⡎z$Pj&s.phW3Cn\-zCWp PtNLM{9OZv*X[ lLOe O6#I';B;gcS@;9{mek~6%IgcҚ"CH"٠,d: M@UYs:0eRVө:K,S&vDs~VڈVZ#]ĸ5 QeƊƙSzh+ϧ6O]SWф&=NZ@g;g:* Mz,-)R\IW?<{Wƍ{6? EoKapI#sw%hծv­9pp8Nd+8rqex92,)#.d(R2mX[{J鰵֫"\B|kSf583Wbr6uJnR +$v cHLSX2Ӹ kXf +i]OY+8v|]Uqc*hGܱ?yJsCz5t~Q ke:8|J`*,`>(u:N3i)z`_ jTN]w$)l\ϔg>_ j"%\,LFRO(c5wK괔ד9N$ 3e$;>4:Hnmi")g4cϨ"^/y1`&<;qh[qU[[^eOK-.J$9F( F2ի-x-^y8[Y#)o% =. ʤ Osb l$)a;q}cuiݝpKQrn`҅Sͬ"c/jN:-ݽ1_|BiQzd|`Afڪ4wVXf>a խl K)dk)zײ[29aާ4e8K ʝ!JXY3_+٧6Wʮ5qqWS&{4NnisH+8,f|.I*a9(@y@6¡Q,%rTkN3m; u|m v㜄6ai*i&T9bJv$9~.ƟZQ^ɚթvWlow[^aT%ғ(JxgysKk;$$USL`uv3. P4]sctPX^mVG] [cC򰟣Pv)/wͺ+QK[ (Lퟎ@&2Άe1j1+R"$n; 7FoQ4YhVc,dC axݷ~Rn J^ 'Ҿ*U)Tbo%f21PW$~@L)Ge䂠Uy˪TGϓ(W(}fv M3X_|c:čH}|߼>m"-vESu^i(䠰7'WëFh ćD8Xp?*Uc4n}G,_VuWŤ0+Ag簙0&Q?8?p"<+M! h8DIsD#MKZes|p\$Df˼^ҥ2 8A1G*d^:j^: OYnW0& ؒťc/}ڏ` /ar'/7?)T &7 seH]{U-χGPr)AW ]N0|k/f<>N~kճ2>@?aP\з傎'A5#1J^ rɫRK6o~4{q6͊@4' *';*;zh 28)9Iw$.[Y>D1sU143z%)^R2(Ror>s NGz>^Sॽ;(2 R\ExBYl'os031kү |w1G޸$VhzXp~-0.ͱQoeK[ȹ :\4T9гooi+ ߚ3,M&a9z0p2`fWBʈ˱9+M\ A̙k3898`FG";)Ql67*t&\ "߀-#*g^OTLf}5͐Q8kͼߘ8VU LD{U~!xyif?Q%85*XG،QpxzzTNW _k'饱0ssjyfN//ˣ*syb_zCW=z J`Q7:z{4^Q3E5bG TqKL.pWLHraI ,q  db_o7c&Ik: uQBa'POTe^7XȾ2Dre?LaaU&F haԩm8pJy<~UD~Ah` }TAK+m iAw`҃#q-ņ&oĘg~%OD3+mi%n4;")&j&}3*9i)#GI/$&TW$lRBNE>eK9V@ Y'b0.heGl@iT%RF|]K.=*Gk9NN0jxy{ NB ^Tԃe_޹P#,ܤ_C.ُI(0;PrnFJ%zE2U#bT+~|z4Yqқ.)-5mX3gvxy YWz; )"yyK?-̻yz( ?1||MCQ.04"Des".' f/#e߯OC=n@& 3 Qΰ@9wH)/),siyiəi]Sw9:0Hj$$vyn2V 3f5x4߬c]Kǎv֬C xv m{0ūD8߱MX4e^pehr'@bltH^yFri8JMҨͥ)ckv` $p @k١óh)y:T3ݰUYݎIhὥ"ֱh!$F4[xEFG#@\ o7pNd2w,7—v1Ycf%NvdΏ0*`yTZ껣6Z?( YqCWm]m}`jϔ]|Bf#!gl oi Zv3py[ t*[U5Ebopw_bkKlTt%nF҈jȯNȨn@}iK>wpvm׾}#ľ6ݘ.$Ŵ-}K9i wr(^iڕh(ޟقlȮ_Kp=ղ=刈]ǗĖ#!C41~D+rSm,5Jg0kC {pޔsVZ2IBqdƢ̸·>Ftr˗(66`15)8.LW /N]Ɲo*\ƹoWԗ`>ttbG&]5=WxoXg&GD1# 6P -qPmQ9Tr`JcNӎNη 㬒֌uщX32qEBNS{DP[V\)+ qxA.Zd1 F¡ ֌5Ҍo2`s/+Zߖ0 3i*PA`F{r֗@ȑwQ:!w $Lק8|yeưiLw]T[AI1N" ~1 '/q޸8c" p<"\.{KPdGaU]i0?UH{󫟾?oAzӃŗViJ93N9%Z=9@ntT=.zqT0OyPiF0#|҂b| 7#F=^q3C{pXys.K~\d@g:04$ ==Lzqr3Bwc.83m->LCc>7‰[@ -ܱXn֚se,1&WPGR0*31ֽS('kCMX+X+B6Pݍ7.t Lf xXmʝAGR\"i;MN䎼B:uOAwg7N^6k|{X>7R>Cyj MJzD^-9?ȘPDMWg4Qg^}}:*>QlW3are u+w-Aj ATCw՚J: 9/f )0R1@cCFˉ;Nl eEFP:Ц<'R:-"O#Yj2XY2B9 9h}f ƈ}nr]Ci\# i_\0<'M!MFpv 149 Nsګ:X] m0eƠp-l# P䱶)6X4}w +{*Ǐ<˕ŞzlDCY0!-,¹Sskͥh2HF Y1#HWr>o-~eYBL OZ<3` w9SFL",cŷQidXgZ*lT seuw@ۃ֢Q 3Ls"Q2q(t<)1-L> yi' `"+-T)`~)둞 ))wl#6#f%T$BYpJ-#'"pV$\~F3:ML"3C1D@i@dL|e,Z9CN I᳉f;"UDr@?,(,uiK-cB0j 4_=aRPɩ%@[mx2&`I A'y!}ۘ31|eLJL8 J2Y K"FTj]fF,Z3-I"D q]9GD#eR1`zIkZ|1*#" Q ^81,0FhՉL0ad"m%갹~?S SΓ7""ܵ=ӌܮ}QI#'nXܑ9SPtyq}F93]]5ۀ~)'ݳ+t@+M` dz <S5F|)hLvx[*E2sG`#JQR;+DD!ˤ54\͵c:ak6T+ԈLF-f^[?Wqutӌ`յsh#r6pCj7^7#nP'pۈ`M{%'l8B o8iFfjAZB2 0!%XS!BnKNrRLCu'|vX*J̿DX ɸ͏~f rޜ=큒I _r?MY »puN>z%O'|.3Î%.J&rqndb`bDO_u.eH?P #2_$4xDYӫmf^z1AoCKTw1z1x"G>7=,Ѳk+Z߹b9]LbvD E[{%T@L{%K=SP/82dH jDG 4#юI˺OjBF[ > 'E($&f.ϓ̧ZtVĻE+g~e2@m[5= U?򍏣|riř⢪#%:m:&;)|G-g+kׁ _Gw0!w/+kbywjDY>?gk)]L. S7|#qԠ\S%kCe{q~ U!NF'4uq P|lf| t% <4N.x>)%dZ @ѿ̩E^?̞)';e}f\p U< Ti5=rB1LnãHRs2 8#۠>y-j`8$4$la U:=) zWKZ9]V7"`yRfqRb՛(C;SDȘ!9a;}t粣[ xQ/0'/VXD 0/2Q[rݰ,9x3߈L.xTI e7c{ T 8Cj %TUO܉LqAΚ 4ʥ`U wɹ-ȗ_'eXq mR^T0^ F}/wGHkW̆LKh`Ϳ1@ā G$=k ݢQ_ͅ:6o)nU U~ _`.'=7QA -.\;m]!XKZ YȗZBDYMD\Iz-Ff[|a[?v{%w^|F6hu9]_Deh v55:Ͽp,y>\,<gtz慘YImc?/20wKqk+0Yxtb>m&f~˞2gŋb5lea`hjb@bm_ >sUK\LLyTYXnw!+͜vhF󙻵}鐮eC{Nn(T3`µ%@osbja>i_?/ kdJ*z!ԫ@zЛO{"3Gyqumojxu7*7 | zuNYCY3lď0 "Q^z/aQ_mypMPL)MbfI3Z,>3C+'`:./ p*y~2+I?Y,[n aa>|N<Z\]`o|uݢ^ϠA3' y;W5| =ڡ)r3+r*#V?~FfQ ;k(Ϸ *vGne[֔f\يi9 k]kYו\&4W^_8u/>\.'P0%H; h$Omu*Jij͔H5OL1J ļ~&g}O']/xޓ\#54-TwlRr~.ϖPej+3|9M&Fn~)@Ep06߆AR}yW{_f_f{[FߥnFfKMZQ.T/Kz<Ύ?_724(_FE0r&vՃó)>nϳi90|̋ɥ{WDsĀ!^pz݊zcRއLݰhnܥ\L@%g(3 7|ZܷjV6BL?і x_gh\G:3!RO=t1ig! ,$mgMiӇO,?MQ:Y<-#xNL*eӅ9O 4+L)cYdʠӥ0%HV:ʴY#J̠$y>;SL2*0v czN1{X 5F℟,KXR~Geww~\cYUiݰT]|e`Խ,\^r$-Tmjkܽ^ZٚUs/^T&ܛWV~f^sg࠯KϏm3K"*q싿?~ٮWKH|dEXy)%u[3ϓF8B ]Ԛ|%^i_sz0@Q&gQbΚ=(aa@鲃o^|<~°esn|37$ǽy'm2=&Ua&*ܰYKj[/e&|턯vN 58k'|)vM _;k'|kv^*ZvL _;k'|턯𵟣ţ9srtn#۷w+g ].¿/lKɭ n翼Udzp^d ^4IK.)]N,EH_匾+4^,Z]_?.ˋ=6$Քoi8K 2ۦӚv-97_aꭿeas_~jyyd=tSlGDO»T!V:I)0t&! M&5m-T veӨqŒlaV)IY5NDo:ӦOyO~j.ŜdqSwx"}ONp̛$9̡!]%F_ xVĨy/ ]2#l5o]%ʕvt vkcЈE%&4#JGUb"Z{#Cq F^`B?:%fDt-er3[8v2;y?cDJe6&Ѧ^ERc!:4KmI51}X#rc,G1Ƽ5c,w7M%R֜ :##Hk"#UI "C(Ul]P^RX"dA>Õ֔v^3ӸdyO&Ƈ<(A (c8T?=չHaIx"+$(H`&ٯذ>X!r+P,3=}ānE٥:#~6ʊX] +iL iτ&Oib4/<>q':'yC,Pz궹 Cl߅{ r[cР4J-I=tF[O1jchGޗr9A",~ηGxzE IjXkoI ^dED4n(L֗݊s~ xVr0Sp@%8y{cj ZM yO6svc$w 8lf!JyAdԊ-NF˙bY;@n(l2"Ml~; ><#6訨p$6Py0 =zfFާQ=Y.i8t$$ P=}i65P}NAFXu%NV^pd`'N[WR&VhP}Y; t-Y CgKqpI,K Tc>,Gd# e5s.@e:%ѦG~1#Lq}< /<Ԃ}3xR2r|r-{\ ddXwZ#]ĸgCl2cCoGFhN@uy_K>&;rjn$H qJ@3^E0M"5zS_>qt1bL _ZqtTEvFDphw͉}"4;@ ܐ]D.{p2G`cB#Wkbkd""WӸDJ()DWԃO*ut(Q֘ᡈlj67Փh\Qכ*bͦ!ƪjPACv̈p-X#ƟzAJR<Q9D0&J)1yOBï?>5"bp")DO&EKJ *]>c,b9'9Γ! ZL+ x-D&Iޒ;ZShB>!y_ Ǵ/#IA `HpLZlBȞ|6}"j/{u7O@$z!H jfAiP}ehNLԵ9#E_yY1EkqUXUpOUEPTij%ύ,6 nLp:\]k 1ƻc4] KZ[*d) ra=͖|GJ]5+ּ(ZAWxj8&j8p/0P4 MS5T |>UO7JzqDS.] 1&Gٝ!+ pXpk|-փZ*p~x)Zp11!o>jEns󱒡?-+q-yAr ?N%^ϰe0Ϗ`]Tf$,lt\1/jɵSQg0{}PJk.g_w+VO^xLoAVW]K~};;KF[ju0Q񗳯fO&p+?4E y?jlYe͞KTifp3+x=UN쇊ᬏS`v'ܡ[/@wܙU%p;!A[vÌHE%c:_w>SnC%ЇⳫ,[z ߑR}^ÿnpzW|%/7}l qx:Ĉs9X*_sl"΃MS߈77(h>xq1 hPϊzq Pv(ɱ)?LߍP?-N)M)"V(C|Wkp}? r3A`y m9vL 3"95s31W'ӮYG+S3{ [WNq۟f %u}os!Fni\pb8S*|JY#b>b$t W ,D5Fzފx|[ 5vHm;!oE16!m U%sny]?1~23zEzKq10;;K78pwW_gcIu"BP>5Ux}&n!<vn`*\s_>.rpF!A_VSQvjb Cܥ0;_]Fj tWy6\9-DxD 1:rN8"E`h[Q(xhvi\E팸\ΓyWO;(1ƛE)Jz$X%DZsh3҄a'.X=kw?|xmIQ1ƘMI6iuJca0#zD~R˩L u2%mK|;>qK1u1 4k5qI<uQ1x P`36r),1y:!uFG c!4R1Pxx$LvES_cg٣P-{u:ܽE4j(Z`|)zz}jG5N C|)oq\9y w<9@ HpHu{Z%h5ibؙ)*WM˻|QZe|Ze W-h6e0o|{;vߑ;C‡_.)2Az[ʨ6)o=`5f`gp5X<Ϛ0J .`叕xucίcC5/)B90^?# [c~@ zsvAwN_>Oמ+&?W RR2ڕ57ȧfNg:߼3>]O feK OX|r OW%VҊ`5)b8XpZhN?pO0BV3:;3 u@uƞȒ@\6Wc j*lgI\zй¸٨0n>;[dlïqoTeEN Q*51Jd-(:ξ_.\젌uMKV p SC]#cD;QhB*md[dN ̧qfOxJ52\4+,:+Ky y r='&,f>pGx<G@tCT@B!HhyO7V"s!:kD\0~ Ca3j|{j6ܥ)Uf 佲 FR D ݾ=N`-{nǗVvX#nTK@`OG_^n~"7[ ,E$r%0LB QjqlS<ASx6OqY4X 0`>0|h Y ܯY.])%/wO, OZojFp22MpF Ͱ84Σ (@䓥;l0~;*9>:AحZ.> E0RS$l8DJ՜@V !.$()a$V`;T]yS5pQ w R`qm:3*e8և0K1@M8cL%Qfl}؂C/k傦9anwo[_>e(4! \NSA̶2 "E<ݏ"''O=,~Aw=`%-Ýe5k0&;xOtB|z> Z+4ZZJo&~]fo]@C*{WA nă|TW+-HE~+jpzzoi2Ee\TVĞGVXV"*cw.(XzX(,׬H~T}u>ժ$ݺj3F6 ',KfE„-@C |T&ush:ݎ@x;GPU9z%dmW eģ]`~|)Zy7d%Λn=ff}5A rm[ͺt Ŕ?l-c|52i) 1(8 a}!S,7ʵۦF,` }n6|rrɤp` 4,$Jxg\ՠޢ_7ן;jO1~߬< a<!%fn]8ah `5)-{`ә|@D-XD)ULNYiFpNFyb1&AG@yB73;f! ;ƤA躸b>иb(RѪ$E]:X` D9Ed Ԧdpy8M"dTbMp=BV2hGWX_"(rp-@j)It\2bl@D>U:'{Dp2-uk:Cny}G* "4ۆ"KHI`Nt"ʍ N<:FOyJ$.m~QyOj0'.Q;?!bG |"s͚eĭ^(88&Č;[d{|ucM@H%2OLljo\p{F֪ji#sD>Q"!سY=rnjܴOJ42G3IyIi7)ғe;=S4^uGVt9OH,W^Ÿ)J*Zvp^6,!^q,l8XGSTjl#:q,-G1G{GMW@q33&.Gk$ƷY]+8nyR6[Ä<DŽO* d= r`]T *c)$!uEaGYlKpS_lhg j.;Y2d>wXDP"! itxFx<:D>Q<J$lͬ9]a5',~;-1:zG5G;6_E S}Gx<ߗLC$,붹=V@?63oAb+(&ֆ-.'U< a̠1_%R S%.h4-Ntk)0{uEJi}R%~:ck"!OGxG%\\`:I)DJ9pAܬ3bHO#<~IMdYE* ׁM(m!in[6K*Mj˜D#)`]rQC85#Vz mw[n1"-k1FI<+.*f8iN{7Y#<~e[eVga:Fxװ@Ѿs#Ćkk) brHKPb 3/ڇY0тŶ"ysriSU;k )]bW8Պz|R52Wr$Vf4;8Y:$!vn:*|_Q.7A={=87ӊAHc_oA2ͷU-eϿ֣LtxlJa[2JFC Gx$S.,*hGb\Cg$5h" $uRBY<",ŸV`b'Cܶvf,Gr`ZF aVjZ#h 49/υ%>Wl@Mc%9%܂R *RT צO0q"ǼOp2/n#D {&7Xyb*̃2y~h0Ģa;߭ s#|f#h3 >-6 '칵:PF^dVJ"(Dx C jϚYm618cþyj7CH,1]8A6 PuR(nb7ڊLjm#qn(bhkf"c/cS{inDD6l\[7n# *D%{- L*A>R¹Z9/a`'0nHk_cnהwy$#t }uيGUA@A~S'r? oma毁lX7XMFdNO 1=^PCYIR*c:ѨΆ*T"Sk\h'ovg:'AhHQ%iݩD<>$"!1Ό*{EA"Sԁ=IZ1Qev\1~u}%rUq!*8F6ͅfX Nzvl q쁽"!\;EGODZ7~T3>gm:FyF#KG."ɭMP> ڼ<ŨOp"S {Fe0U7f|?MO:: S lZP:*GYʧ$PQ?xj2@ģ#2-j29Dc?7ߎJB75qq2EPM CCqAAUFg |e/HEqGW%_J1t;^Q(*it5ٱktG~hT\eNQ)'jv[beBsv(NQ Sg F~]^!sZ]@ک_I"}^I o/qFhpwp"n/! |igܶc3ߢdKj۲nrf'4bXT.m. ?/J~m&:Gr[ʫC1 ZnM>quxaI-Y<{ ~[alowc {@a$d]+q9 y sQ)#ᓍ*;!b3r$ vFRdȱDJyk=X A :Q1ra|5NB@@pK E<)Ҿ2fBg,\=jJt!JjDXxqL986[CDkdqAhU1@N0 (řF|x%l7~d#/ҊC}$7GC+|x,OX$*KX0" [v<<8m uh<H6=,9n/0w.$: =<<8B$ Q|\Rg^AH+䔁S$զN $ijк3ă/:/&6kJ`",f X%"],Mv53xyfAύ_d L 就$L +5E2䩈?u2xҽdxD PK!|=<<8x蔳OYyS[O%QfH L`4Gp [.ǙG0ނo Z,*Leb\P[Z8)wZ== IHQMXr.g_s93x~Y%.mE 1+ -PJ9c1u<;8TLF:8g ϗ٧\SFWLģEbTQSLw"<<8d,S9FFy*1)2$q"PبԢ'͞y܂΍^,?|ed" Ͻ oz[Ϟ8bΏtnX']J~amACN~SnZe'l$!!=<<8tb-s2NQ:}9Mqe-ˏ(-!qX8"z̀g6{A× >__$Pk\U7y)(d,|7N{̃\gA#__%2̵B$jl ¬*Ё)y],ʢK9!'dq0"Ԙ8OCbq&%0sʄIh&w5ՇRYSo~B4n45}N}BZlB/O`HgPVrs?+u{ŵv-^삈d7O)#<P۴ ¦_Gk|fW2pH聿cmU5T$7LB&eڀ ae.9Ir'XRiN?,T/Mw/|]an=M.rsTD}2zgb~~eZe`V׫/O~#ݮr;L^m|{ûBuCӠs3TiE XfVR3 }"{aRgO38WM'`|ۭ.[4q3 ݖ岸&Th5?%l5o(3{RS`5I1Y/aRκfKJXxlJ75/_Ի|g9ywݥS,fzwe e.x3YXoOz[:Uu֭iv 8IDNP\N~iSM=Np7"sLd^ϝTj[x23GY ھ #!cbTPrjSUWԿ_W-`m+W`nNg& |$'O?JS- z@[>?o&uOy9ٺ3}u1~*%,S\h>&j_65[ EW>ܑʘVPa^wf#x^^D}ْ*rɕ50A0+Hl P.& C]B|b}CFn89C>CzpDu[z']XSG_wc?XW_"ޓmT8*^M=M7D ms78һ}({9|EeČz9ɰvQ2E=)DŽ=J9IHRi`,boH'`j~Bܯ vُ!9Ј yfA:7LrP$9 ݥ ג H2yo7KTcϔfXYA:[q alDZT]tHVn|V!75fF%Uy(k(_ I4>xB;dfQu[ƶ{BWH덼{|e^c!Y*w)lQ9RKЊU-MN6'W7LDl^ÑI#9F)~ t ,Bk8>B,O}Yx_+K(IIٹԼ/y֥PiV]~4MHe0R1 0(YJ9_t?$ݽyꏛ;{x0I1X;0Iάo:KX4WIh*% 6ĠnŻ4Un"1 CwzI]Pk63s.<0%H$˥&^3(q/#[B|Rt1@ %[w,!,]e=KZ^RuV.Ĺlԅ41RȄQꔔ8|J]ʡJuRԡ`9krB⡯vزǻJ{WL(1-Y?!~ A'P.MOJc,ߍ1$X^Zd Rs0>BZKIhj|]>wA*Gԛ~m}qh}ݍu\FCvcm(ޗ&Ʉ297~Wb6p6= xq5E#rSaqI,VG97EpAHB\4k3S:s/P#ヿ] q2X<2k)fB#m4֨, 2q6=*٥irC 0AZNj8` ƠAF(p~;8+Sܚ甚{]J[FzZ\s<  :LJmӍsZOS!ƼPw-* PN)*BIB_>ă|M$|I.N.h6N .-N b^@Xőyu#TXuGϓreM˔t pϖyg-G 5Xx}0ާyhGzg$ep3βlH$T%/3γt3U`ve`ڣt w6M)qe&=|yWذ8X sPbJ!gj洖1ruBJfOw ~qZg1S! ̤;tf9zz?,S_߯`n4.oyԾz)}3b~JOϰ[%i&m6pvV6̘*v7.D4L %1î>}PZM(-epVk? Z",!ˆNY8Y ! PCQf|8b=0ƞL|hVt!' jF"õ,"׌01CR e|;r=jsg9.isHT/@-#J[b3SchpZirX ~oG㘭`wpm/wōÝ2`*8Cv!cBy1ddT)2: L;{,˜\#ڙitr'Zg-AӁܻ$8D mCJFݻٟ-i xԃuSzw{sA)% `BAO9r;u1sHө03M(Rjz, pc*=קzHrz" =kbko+=z 7Ls쉏湦Bqڦ͛'NrbkÉ ֯ xg`>`e#aqsUƄ)46`)?ϟn$ u;@pA1UK\Hm̶[3tԐ@iԐ2.|96Rբ{9 Vz38N06 U=NvDk.GHv2U,4WK駆P<J؈F!1mW;B(v/ I<~u[r]MG 0[OZxTd,4M,p6Bp:)ji:@4oט1kڭc)M[FFsp,H/M@H6eߝ,{r{%&ŕ1:켨8N#Y G0V!ap8IVt;qY9M`ǎ[ՀncJK*I/)̓kGk$c0OoIdQ7V, CBKۚ"g΍stFap\s'a4P0go[J&0{&/!OmReђgn)2T4$2Q ia-b$b`܅ &5/*]&]~QZGe1i O7<=5>^ ӖEAeI0N N#SEFGwz$Ag,7.氓h1&GG4A(hƎ!Wj%\0[_E{. tO+(v3;QR4 gtA<MWʢV;SF) =qƌ=Gw,8].ػQ3Ep FEDNQMsUݤ7hE'J 4t ;e;vBqP`c8< m]2xq:F~ཡMs:R\ ! vP fa/]M >1v70^bG0Tb4-F}8+)ߙ(c5xf0ŗOi+_ bu&v@,dU=y p7IA)V]g}FkM2DQIB$"F[0h3gsX{Pc5&1EL 6'_( $LSx.=XL1=S~P9!)8(;puDZ=8rn~.94Ql40T00bg,2Ή(mcY3Ptgx6 ףb3ԅ0KpPOOʈ?@08xqoٰ1H VH ?}75IW h'Cn/ԸOhhDN"5-bp]S jvkHdx\WraD֍=դDD( N>)vs݀CA&Ȋ讲Zc\+Ff(>c[M"*GR!Ir5I| J??豾~;OJG<@087=쐔_Lqlwf/ ̯wI%)G(X.!J82X.uv9FQ6+|5j3Z@g d(=rMLROCh ͒8d AQ* d=#Zf{u;j0lsZ>_FR܅s$4{Քf;"^uz%Cxz?i__ёTj{pdI5Ꮮq$5}lcD6p*j{Q >۞xRR,>/4 gYz>_T&a4`>1r8#U07H1d7}E,8&Cd6˭X=i:c;uNj3f̡_ +o8p2>[}o23Mj=ueD]#Y^@Rp)ߤD<86vE"v ĿZ٢$<*Uh,ȇbvoӯz:n^ *!Ҽ+ z}~~x.qղH33{0< nVz^fWUYjz6@`p1b t,WJSNìQXFRгFB]#ݬ%!:Q[F&hfթ.vq78 #wp}vVL/fx:DFpGd'pn[~# Rtf' mQ\UoK[ ki^GYsOlf4WS0ATt XSw͟Hâ4VwK(-`fy/g`gO&Orm2nlI]ql5m7eOhX#5V*â?WLF &Dڢ#KMZy8մoC hɗOmׂ>Dm}Mvur;Mze)9x56 gPCu5*h0;&|)sQڣmBs?SK* k㔭@ V $77XɽLx#$WXǚ<76c_'-Q#AdGoh)^~d'Ga'ҫCGW` Ǜ֙ V{_-Dͩqlv QZǬ`(D[w pe,4+5܎_1ωZ Kf}╖Mwݧfi|Z~;X̹Pթ֞in[.abUvn,F]e$~=t-}{Yw.F6;i p`ӱ yp7"l* F<-ѱ}BNɡ;L6a)RKܹ Mˡ>X?MD(,o$_n suj5߿.~82VϿ\jVL4_\^3jP̨A1Ōͨػ6%Wyf>?xy bg8}ZP!!}gx %RљV`FiQ]UUwuU3ߓNyeϛC{Bn`Lҙ [\ؒ5- [vaۆAiTQSL%hnRifF)sHJHPEB^)@XNRBy9r4"ljd܆`3HJZwIwtM܉)y-jͮ49hA;L+)[RќeI)TDiG>Ymq<9Z;Ў4e tW7h D] cMO7U ZUep!h܌ArmY3̙O}X}̂J ȧz@:wRߩ*#g`\}hvoA3:\{{pyf#jEx΃8ºQ$wD/` ƀ,,Z1 uA;pԺ&gd|`l7 t:TG{7 ;θhjFJ֌?؛ONvP6nvLHO&=_>:r~>5G/xǑj!_.5^7 Ͱf9VM+j~k{hbЯrޤ?m}=jfdUMtQb?."w_kSo|xOI\L, Zi(W +(xNo7dl?gs 6N#cb$~!#Py CwV}TLN 1T)%^iGjd~sVk/;^sXe;ż;iRBikYM)|\?Lf^K ~/>9*}2Uӂvފok=y282}04hOZ[}pQ{|naØfj$u2h*.iɀ4ci1v w W;{Jj5 Η͹E%3Fp]Tqo3MftqW=p~T&|(zk(X[wz> pLp4xQwmY?6սXS-*Px|V`"d4NGUqjwd5/zU-_¸8FXjj`;D$ 2vH%(j@RpwLcTOSeNj:cY&&V B($S1f4j|sO%X񬕤g#ZqOqWV :U߮ZGܯWwsq-!)(o  ԓ\\׻zφwuۜ'g\4pE&zp<G9DŽRn&xko 8aYk;X+\$JGFQ܎("=rv8W] Rե7evrmgq#-5[gwO@O{Y( <|@*5~^2fX5BSFs}k!O@{ϗ֢7 p7Ko5GW=Z/}zs`-k,X8!ˀR p(0Qc"qI\L#$!d̹e #t(joFI*Fj*5{yTib ^*;~"=*φR+p]k%X7/{Սy:W~{#lv RF怰Z zfKm`[DxeռʯLQ2`zS=*D験8p͙JxìLʅTVrE؄ A2 =a*R7My>A…R5cY;ee ɒp$qF  JaMΐ(pXm(Q *DHXoxCA9S̆qH :qK,k "Xm& nvbz ruXH=>E~`hsX'œ(aHǛug38J)^5 WxQ1@V۝8rΕ4y/>s͊wuنNwޞ92Mmfv MVw䰄b.3پFcv6՞+q=%JVD0o0Wޏs:7>2ne\=N6 EK#t1wVIFvLԙ~ ົ9߿Hkey{a﵎0hoGC1&$LEarDŊ$U:|a|2>91w /)@:> {}1YG 5-&.ʖ_ff_ f^q`XIk8_jU6{Udm]lZ];V Z]a=5ȥMyӞŰ}7R+*4N¨?_xyz/_D}8? P`dX;w:pEWMC{b[4-x|v%rkڽ$>M/AQaUw,^ӱ|j'=7_W nZ^1,(jNjaaKzEEo|ﯻ@sEw ~m.0X}nm759K"ЙBD1=k4d# 9=jrOڏJ,0\h:R/K-6-쥋oV7@@I6{jFG}U#߱!.e{cp V .gicJZ7=kcݕN G X8ӞS`Yhh*LĸhIpSd>FDPTrX cC {/Jf`YC_V84>!,P}`s81D-uL19fW,B\id$79iN'9UOvdlxbɶG[[KǴ bYnz"˙Ӹ2$ɎKD{Hg)pg/mr [O*2#Ϳ.w8"6 /Ve}d /ڋmFZW\~mHZB[3~zjdDֈjZ?4 |frEb(Xq1j6dTiۂK!7B "P#9:r#;t9:r%%GGΑ[st9GGё9:_b;Py o* &cYb%֖R2|%gSt'OㄸXC|N.vnR!=A*jQ ajQy֟| J-c1ˏwLc`\bҥV:F *c'uw]7Cz 8Rp(wp)B0cF@1gz4‚SgՍe{k5i;㛗s?Y\zPޔlm ^POm ><_蝍U]¨d?y sZD)Z2li%%iCDu1ǀ(Pʍ"6PȰ,A[R.k%#29EQ܎("=rv8W] ]ϨNnF(h%?{z&fN,$K9RN,$K='Y_sd)NnPQ %D[#:GdqjcՄX3 uA;[Arz(7rz(7rz˹oո2h9r)h9r)hIݞT蜣)M9GSs4M9GSFZxÓs4e owIC;yd7C W>Va\97X\1ME~ʻ4KkEB^) 1D*%Rr.qjCN}gDx4Fjog]8K(vYo}s? C\E|e`pɍt8O1M* <6/mbjh]:BSFngu ]VXʼ&o{lok-:)zqQ@Z%ň^[ܷDX.cU,`-zu$o{C 5' ~G7Hna_u잽Ҁw*yuP3U(oUHVhgk̕lG۝wV'o5wv=}:p nD~O.]=]NNT>7vw56Ɛhd)˝41]ZycޱGt7Ks1}E5B[7FfT4/;6jr5eLkpᄫWJ/GSt:(Cuqi4]t@9K$W ?KaZFj&~/[o4W5[pH&L<P/k|ץ@z!=dZ)Xvm% 8}^Rȫ6~ݝ_-?[7է~~sy>[y_Yqև|RZMÃ[|"YXwۤNaGC᪔H3VIn1w~5ˣ0e',?(R_쨘WDWK7rՒ&4Yg=?;KI_u2%,PoQ'|LRA+H*(qzYkVVMj]pl^ 2gfP $It%#(AsHByHj(rMI!N!Q5z rlArgxVbZ܊JS#Ym?rrA!W̓ i4QR{4UťrICx'lkPLҐ %Z`ead9Cd…*EWdІg'Հ̏lM4]n^=a۬ 0*HS SϙdkK[NykVN ع#(smH1LkՀbzwj(ZRcp2H24o@-+[U1ZW6dIB)s1Z E2Phވe8fljF0f>!"Cx+\kWgJ Z+Ü*.g{CdChOvY>U_vT<Ŝ A(/Ow Xe;HZ2Fȹ/`MxF|8i\@1/fKB$s!u4'k֝eiC>d iI|1y8{~{5|hMz=|jåpb$\L@VNatxCHh 6C$I(x| O e. < TY,# !%94ΰ0A^$H *䐐HD xie#M<6]RJ%CdҐIop(V*tYqU\̽26DGZr^j CN k YN`|pjPW/,WcZY X.%HTQ9ZWǃ@⑤ YhZ@KQrwzDpm#/2dgg>W.kU" p= 3L|gu/6k2W'/4qs|܎rBw&C첶jNĞ`Kb%Fu K0bdV2 "-2ѻX -b'F%0g&K b-@V -h^,h XFF9~f2āc(4n>&I CżTCc(oѶ>EβȺ/a%hM ,CPhI! 齊ĴՈe H8Phrҫ%5CJRI ɧhP\ 8X# *WtP"5Yԓ8%`4X@:T5e$YKU% 2s>g<ȳ/Mۅu K]sltfMIޏPb0ziZLGZ}QI}n)# %KLBx!Yʕ *!CɴZy1/Vv<͡uUփOI ]oA~Bo@:@Y}u'%|XJm :|xKb\^=% >0.m:+'g //xщk|GK/%T[v6wbl (U-9~[\98N ]ĪV5ܚhCmX?ϻǜˤF&>D{j≟zbY$NN?wG߾}wA::~ Ih.ra]0}){u[4[5MG4UKѮ-!ht_,LzH/{3KNdzEEk@"0 -?K ucQt1K[{WKpҩ+/+Ft*K}}V$oSu*x5p^M_ѵ}qJM9\^̿Ήn,Ybאd^|^:xv JժZl}EJ//'Y\׿}-ij F;k̍xe>LVV9L~a`x^ߠ.?is8f;cX 4\ܯYWK) ޹q$Y4n+/y!`,v= 1XY ` jI /˃{yYl,l3l쨬ȈsWn+ݤs1-U9X{͵`=s!jI+RgU}sԫ|>ĕ'`N}Šj5˽6JYIU9=&${i:tU*%ŠY.ӳL B@1[N\[P^zU޵쭟ӝzA1A_u{z}OAkV#~nj4slJZZ;@.j1uh "/PϵH?dw.'N6ټݻl~يz&e6O"-" :! c6B?{w-k"ڥ9oy@닝>gQ_6}&J[{bѡ$D9_׷_/Gg5 HƂONxraT>^D|>%>k3yAZZt&WFӶ|5= we-:(upc/8nyrzt|{gꙮUvy}ߌx6ٿ\.}*a6qp( ׿xjڔ_Yqz/&kWp/GHg}[qKW}ֽɴ|AcRT1/"޽0,e& òzn˽<1;喓9-\>Z#"cKB%Yk8]5&k#9r:]wvE'Kaa~fĞ d'Ѯǯ~Icyo?./yIFjgɫ[S}ˬbKQ+W~GS&U5f1:kU:eذO9WS5UST{8w:L 6,AV-Š ַZCB:Vql'#C7mp5m:Mٸh1hsى\ꝹlxhZysCȰOJMSjJ=EKB1q1N#1;166{PtPQ-^rJx!lhdHG}^!6fU{C>A%cM";IFqPaf}yKޅKÉƬbjOqQ8FGzt! Q#B2 e}mߥҮ >Ma#ib3q(Fm_I0”A,=k q[O -I+} 9!z9^!Bkڝ h gՌg,HZ{& tXQDƢ td)άEb&ҪP] C VQ;썃#"Ǧõjy0 !뮒}H4, -fle'J Ck?PB:b84IyFF44 kRJR<.UPrO i oV Q#n W| V.Aqak (5@ $$@B $$@B $$@B $$@B $$@B $$@B $$@B $$@B $$@B $$@B $$@B $$+&сvr6z3$œ@F+$Ы$XRHH !HH !HH !HH !HH !HH !HH !HH !HH !HH !HH !HHCZOmZO7?W 7qz> PKR0FK#$襂K\z'bK%vjwIhr5%E)W\E蒲`4iqްC^s0 $qKߴb.e.| 1y4*8 .ċߎu?A?x#iлo]ミjVβ6H>z~/$nOL_iwXr[U?YolͰ,nanR}sL8~J3ǃs.(H #2^2M F0뼧hNAla:xඝ@~P #zV ԩ:K+y|w}^UТܾ(?迹9/\I 551$ꚮUWjWsQ|ܡ,97ٽkgkkeZ\WڶJȬQ~ T = 6f4":a+BSBįEa~:ݛ0y~x_|ۏox{]%oä묁~ ${S/lQjoZ57BJIWW:;G3;X2O Eon짯#?_/70 +pƙ{+*No3th?0ͯg~RJT*g&p [vL_l8S\7;F8G3{FVOhSiD$C?jًWv~~69+?RJe=;Wׅ_z:X 3I-_@/~6 _,zɝ ET\}Z|p@_)c2vrf?^Ns6  gfc}wfp@yOe?vvɽvb}P7s>K}:FOdIǟ-١Lu*/*Ϸ=&w;n\IՂpD2+'y:?<(a7: T F:HcYm7j ͞;_8F9(j(o-ty }y ~i}#AeS*DrA=kc`7VU;į2 k{Ju]?:>+WaI-# /sY[f1&wBAP;\R%LBнA4#J*x|.WET襭zV Wl eڥ]+mCU#iJUm۝rJ˘j-۫ͣ඗(y xUGcϰ"!W@:Qk /sFW .╌5m/wҼ:nƣ* AbؽXбaAu?_ʯ' a%261+#CRК2:qDYJ3Q&+=ψR8 !J ABNZ&|"Ikrj/!,%,h Nm9;+4xW )WǞЩ>ILjN8{c@ [;iDՐ8B:@ψ{ Y*ՁT*&(yѠT[< zld"D慴PN% t4TdƉL,Xx`թT]j|u=Mj-+x.f)/LpTtQ)2n$hUO]{`ko&6 }}Kwqk`xhzn+kyVV;C&gWнcǎ un+r))\:-"ίxIr&xPkK 14YۊټBǴyM$fN$Ӻ#Gʒt^[@ڲGep.xyM7Xm%`ld[KHp/{Ks$)ެbf{r]]> 8!:xNy6sD a1 2l/syPJt_&;V}NHo"y'z]tŠj?-I~eZA)PR_ytpS/ǥ-a]\CYTsm)EL~hh=}<;@&:Ŵ jv`pmRJY5D@RZ0;a*41F}4oD#J-ݭy~MFr+S*t 2$s_׬r5%5xIַBy \rÎ Y:vS˙V|nzq?ؕ+]GZHQ%+^?kG}wZ|CK7]=,wkW(mFby gUl]xRτ:VSE3Rюvon_Ӊ15Otx4j}Y 1l%>#kJX;{.NGRΫ ڜfkEcO5?\RZ4'hX*5# +K1tpj ]Znm.'DWAꕭ+ILW[ƶCkaj+: [mjסIDWXsBV72 At+)thbNW gJ ii ]AIlKWCW$T4 h ]!\fBWVc+DXKW'HWwV]!`CWCOWӖNѕↈ "\)BWVQ~te5yO4?G9v cL*mۭ1I[ ؒ41[ VV(liiN KVk]!\Bݟ-] ]Y!lRx /j ]!\*BW(ӡ+VqY c;g%遵в]SJ~dۂXKW=-|zH$%R$EҢp]]WUP]Ql0p*JhfG^}8^?wnM^B@QDBk2wkOi\H&DI&*)FM5TQaI\B1L%rhL&}WR43Ԙ!H]%%uݟ^vUEVWW@%FDsTWBD0{BJԪWWJ}@WR]i"v#y)AUF̈́ds:,>t_Cێܸ$ޛ 6Ȃ5;Tߟ 6^0xR+cNVY!qX}f e.~${%O"wo^~*An} ~+U>pk@9p\T#9iK6nfʞQvXMwfqwJ2Y3>Мg?亭<=9MlD J{bxb*+J3c(0,NIٺ7|N̉v`^}9)]'a(I޻2S,YydOS`2f&#XZYh6&1->K=?_ ov_,6|ԫ Y qdB7Mz:(ҎɃ^d¿jjo:N}7Y:=휄f<[c4.ڦ(htOlX9װpv/_/Kw16΍&|˪?y96_5WўZF7Z7x~Ǝ %XR*%AH eш'6` »~ ,?| p4yVf+AI\׻!l zwtN>дk;+ 4^gf6t ]lxLLVj{S+&N_^;=P`cS-=;EAFPkӊG+ӫ%0|0jE?B%ՙSJ {)U݃7I~fk#`1l59S6ka-mbn|8拓y?%pc4{6@08AM>W٠ PN E-)hj^YQt 5C|fS8vhr 8M(rneZGEd02w9ld"VD hVS=AVqE!;rx(*,|0zQ |o6>>S(X_!G;Xc,1&qW[xswmqe[e!1nrn@@J=ʥϙD.ݛ\z)D<ҟe.] ~i59xUoP Vp603֜xѣ`0 ]9bޥ|p̝Uy$bDŽ(b8kM ap) kٚS=k#a6Pg_7@@r0ovG} 4^i>}p)T.ߟY)qW1hohh0)A8a9(HR \2:o:<]{$|;by")9IE$s2JaQiƝNUp%HD0 AP$@2pu'[V+}Pݔ#wv{m "﬋(9$50.:3T:M&;Ƅ`0>Ł ́۟4-"HTG@`TB Q|VQYA\G/$ b4.h^Az^ 3>RqX-D$MHpKAO"DcZ§HI!Wch#gUtˇA׺Y+1*%R?86ZEl_u"{Wi:YQ*Ctve+Y;/Bsz"=1>yݐԅfvŤ|(jnPJZ,ڟG5z>[׭}dnhz;V뫮^{)s.Rǩ?nu|̬YJcuj]T{Ս̵:W}^CyxKL]$~p tsHX+JpS]]uu -еpWuyNS#Yhҋ¿j\ڨ0V2hL3? b~=M̤aA阪Fċ$YM>N5&5ò$a5᤭\}^(Y=z|uE~Wg(t)3R?Юφ, 2^ ߪIs|c9B3}=U !|=?GҙvJ {;׿~vho 橀iKHl']Ơ ·ETõ㘒ƕ}ל. +`t0r`R:X?/\hiKnrt*+$\$EH8E cAE%5eSyb?8WO/xM^Mڣʣx'ƃVHS ΀ٕaWɍFC2h-ɠ^:+-(hD3?a_:?6JJh~#>#ߜ9Dݜygi \uئm`wZy"GV)l 6ug}=3iY_~klJa̿ Z{1[ ҇^[S9 ej]7WQϕt Y J6Bd ?E Yv0~xU]C?oP9&>+p?-Wvgvvwv&m_ V1ic{@ZAZ)LƐ(rrAΕ"(圿u-ix`,R*g era)$䀶e[FQHh($*[hy<$wNF6sL +)I3AObX^Xtr3{8:] 9٧>ӕ]5t%tD0-ġ2HZH:BRk8[1;Ei+ #c{ϢLsLRm RQz=+cEn9`*7X{[ϐg X:.PHbh՞2#,RFXp4ZOro5?5&fq)8X@_2w %$m@=`p0tH,;ګ`~ zH :q%5]ڪ"Z^wSB2_5x !-" $Q &PE&2C2Fxߡtd ..3ָ`!p =/\ދk˵|ZwZDFkkKOSv`5sL@[eqոWt3-a<ٗ{*y'hyz|_deV>p2%:$]`!rb]Ho.G%#I cS%dVU˞Lmi+qЍ U0Qu#3@Ǭ,wTX5`4J$XΩ_oMOQVٮj] ǽv˅t{\&se>M3lu[eHRa\:4ĉR)_L@(PБ1r`6)Fs^ O_Yu10SQ iVobC~i\ >bNÂ-riEP{d`ޘD.b NTRz0| 's~.Nի8W2ozІ>{^G]Bdx`+PWLk2xcxhWޯ4ǼY)ޑ<~ %4M7~Ա}uʌc?G'u[ܙtJԆM&UeX;y4<ɳYfA#|17CU>$&%: >D}>sb%c۪{C?ڬ26{*Z$[(N[V P1іKY\[#1c:u(DO[8˜\BMFcsҶ}6Yʳ uvܕ.r=ebwL_л X ցweD>ނi.P,ZE_meD0mS8l@4_,&Ruq"?:2AO RbBfYtYh)lͣdžl9AϠ'ڷtdݰu+gr~q,L9n1a]6;:#VqtN)l=i'y\LYwiR^KmT?^]iv#^Hsm& QnGn_ّϻ AwՅqVtEaADEL4ONB?Β&:rE-!I$p34PQ%Z!;M]hcҺuYɋK&\$ ɤq$FXŔCbƒ0.<"9W|qecq9*w-}qAίVOsn2!,~zg'?B;c0Oͭ?jG]k/\~yfW~УBk|y<WRVu~rh\<P}׊(uz~{7Sj67&O s9F ,#$CFP> l1X}PUO#fYzN!1#cJ*0CP'1@ARUR a@Md&j5B1mXc*" &ܭXHMb%1n[ 01pec˩@X#,뉗.J:r[kv\1]qq@c;=N0,j|"| 8C%MbC(lRLBIz!AH: L1tR%:\"AGԹ\kX)2̩?gqNq g$$BraS0 ;\(w>f|ZxDʊX'~ZRI8N5ӉhHd&1q@HS֋Y?^ښ(tM8H0f0q\ qL "'&(vjF$J {EjV2_NocUA]H.3Pļ\oX2pϲec_SՍcdm8Q_d4x= O5'_%`&wߊ7چfM~u{+Ȗh$d%G%p0ډX˗܅uRkhSad=hc&g/L<@E{2'^/?o W_9,xc u>y NX}YS5jF|5r+>ɥ˂^lCaa뫱njUvd̺O/[N}?)-۩M~btbd/VUX̬_-̮pael _+|9  ~ϋ2k};'ν=z8n)g4?Q̺K=5B$ŅBn/'v3/^\nnW:DAWy0^ZdW0~g0Z|hȟ0!?~y,~郓tn/{-ÏNf9H.݇A ?޽<̿"|ɗ0 QX~oc^fp"t9&+?iKU?tvyT+tNb|@1c&MHˬRM3*V+f*Ġ>j;KQQ>c -\m&e()r.Doc&Ӗ JeaPjgwg db%Aj'kٝzuʑPQt]ԷK;rJ$zgK-~U_t'PlmI~?FbZ){-ց?9bN6dnbt]G:C؊Ig}o!=f@;-TnSF!ƹBm-ہ-xsUL51¦v km=^iXeGd+xtɏQhM%ހi&Цu+2@g@Bn}:c$.d{WZ5G>u`ך5kׁwŮ`iꢣRd.vf-=p|+N*mpMv:zpCpebI$a@F;p,4AnNc~]1V/fAA{g4|lX[`"2AY =V7Nr_i ذo3лC%80Y{k/e0DnE3@gAYHr-ZX6$ M&3s*9x> O+ hV;26ItCnH!ι ]9aVgPtFeo.7v6}>cvԺ AA n0R7 BW;PM9N; v)t2%Gx8`KOU:>*+I!6Y¯ Iv4X̀kWwK*cfYW>Q/U{]+i$2;~ZMBAj}~ܰq{46,fJ&MJO5ׇ˺wӔ8'P;}t}=g؜Rh ΰ֙r(:Cz\+u`5C x/]@-:#C;(|8S$A|NPh/=43ꠌ?@,0B]Ni埯?Mvv j7y ^;)O\XOcY߬i~E=`EZ [}m|T0Bž٪ ,>=F&21aL$D$Ar[9W0. O B@%8;yQ^"tnDFwDN) QBY 8(fZ*˘H V@v䌁 "Pk\]Lmϼ//9}̭XUdCFPW ƴ3'ţ47+n;>(KٯAKf{U?{WFJX`K`̃XǘA0򴸦I l7DH,\ݰ,בG_DFF~KUަO;v.]_H=,B1XOJs4OTw6n-n|DXgD8 [橍*DākΜV@c)"-P ֕gk&LI ViǬQ | <R5g^;y r>8 k` }ĆJ0W&gZ8ht@H9(8&|Ep!oDhBm, * p*-Xb_f ( SјjΫ{UIyeΊj@XR _5x !-" 8Q &PE&;#KwavZeA%//`M.PS.WO=Nu][%N=d~gPrmso3~N8sYWY"rfXt"ZZ414M\e`}|~SD^ _i%9Lv"54ր GK(dGX*̑j_`9f`d"Lٗ0x#,G:gc٣9g0?.ϑ + w}rmGZjO\ =m)2:%d{?,.m ziIRR˞.[YI ?~}S6ΫG=.PO~Gf]x%K->`p;qo% +(ERI/jg>hf *3 M:ؓ>i>LQّhHsfS2z5Lރҏ̇ p/k:yv\0s,nƱ/9v \Kr1rPY+DZ:v Hv?Vţ.i00E3(,}TYY.tg*_*0@X TuZ?u?[_$/FýQʆZ-u.o.u\V5~UyW*4 *O3MJ^7o*CF{<&d1]ˋ 1B~U0Z@.Ϛkzm^BqhRT{r-c1Ѓ c`bҹVEȉuX` 1됞Yh`k.PH Ș8P=x:FY^1Tk%iYxYIq/@,7:;Y&ϋ :aZO[(Ai,SFle-F7DmEn0/a_9-r͙A6ƒEiCCYɀE_5:4>\nVS=od>Rz^)X70;t65l t>\b %#֢M(#x+8>jK ^Ȍ ^`A Ɯ\ 0p!Wn+MWwU5{d@VRaRD%$@tY|,#Et$LEt!"'U2` e_E.qVq2 A3 ('@c}9@A=r <6 10BB23uS/N Y$53cNˁN8혜9L,Ku>Q[g<^n;hg_Y˟Qc0T?NT?Nܫ^*}># 5 ^Kӻ(\A&x΃JF2(oxͬC<`p* %>2yC(qh@:Pʍ"VFudXDA.sGkZ |DcwwgjU A-Ź7xvO̖بl"'!mx@fka`KiϩB-|4N2#+:MQ'0ކjJL6 td)?c"`PZ-Ql\..1⟹/IFdJ>s3U}ٔ S7fcDTpzxC{bXG2!@_zIS4v\2 etL=CWY3XZ&oUgֽC:ye :/*, ԫTް\krK$: h9:x .اS;u|{(G8AQ`XKUJ0g%$PvۏSt"Ү!-wtW"3E S=UQ'q3;|Jng,WrE؄ IBBOJ;f2ؔ8\X/5^#z޺cLc2pp 2%*\9!kQm(Q "DHXo…]U RYU4T[`lH%1Q Kc5@˽$2qgc5 qy,h)s[yσSZXY€F<ЄT׊(EJjL oLB+~TtJMt^=c*w[}׹OvhfzHkn/^4SY7cVFk fͣz'KZ8=sGJIVg+;BVFDM\S }*>{ okQ(C2z #01H)A-q`GJ RzFqs~蠸C?S)ıe{$~DB" #i&pϞ"*Xs0Bθ I#a}qܜ3-GC߳"#h)r"`8*< AIĎ -(Q1N]oWleqҦ@rqPȇ0ؒ$7I/~%kmYK\pk.k:T }> B7ߍ2W(I`Mp)Gi~뾿VQ ۝4).cĈnJWudX=&jP9#sE^5e-^~Thv:U/Y-UFVSdw)A{ij*=τ:-~}}̢|~@_Ң7|~\T |Nwu;_ٛo'>٫7'g}+0\4fᷭd Hr~Ҳ)HoBkXMXkUW|<[9 ?݆inxRޏ{W׹\I.NPon(d43(;*9r5Z?y1^Q촨NlM7$a#* >w*m孿*&fiJTQ|!SedPDΛjGwM>,ÃG%T! \_mşd0c*p/TGL:92O:N-sJB d܏3X?8c8jJ|܀5rxLӼ8?l)q t5ӗɁ*U_w\ kJ8ᗬH0;WDĬOγ-N&d+!<0uSa[ 9ZO)&{yY-Id7<~`0(p " rqP+;t 24X!'V!Rt5{  z[6Ecsbဂ}<1_ްЀ6!쥧w6o7W5T>~1Bj_7|0WVDTu @D颶 @8eEmkzGO@610#uR/ N( QxnɉE(m^FGk1;IKfQ]F0&&V8w~G(r\9wslYGFd{6loؑ]AȾ gn)f Jq&Τ@AQ1xΒj}5s/:{^WuGUs45Uηj,V Yj|.؀}K4 Em9iQj,Ĕ2WmD`8[]K @B1;v6TgwO&$+c؅C;8f71p 7;8$] bv w WE %KCq%D^#dϽ4${v!q h? DvW&P7"zѹAJ1V9[[ vd\.Q#pg瘞,ZqX8{]wl PCgWY{QӀ8%gt!凌Ԝ1[O̰{nCl>fA65!=RWaF]evu5乫KݪL-g6UA?z{OɹtM5Yõ`hp>f-k;Mܢ'u"pc&+ 2C0lB&nC[ M}ڶ[ B @| `ʠz"OHQ( _UK8zA>H|[Vj=؇WcNj `*TN1yD" .gZr[E; ;ȶj )N~KP*,%yj#6At$EqSd#*f"Jn%Bz,e%H-6J(3PB P%#K)hP 7 5o\24~4M.?9m%Z, d-L6Lrٜ,S%NZ!u<ڣ585@sQ;B#"QԔchWZD=ȥTsڢ'(u.0Al$$r)4 @^[dwFp L9:mN9:mN9:mJC4vw|`[rBkP>Aɟ7܃{\{7t=wW4whU6F)D;MM.x뜡DB0*QYíN*dcg 0w9pca[U>~wm lQȷN$om0W齥>;A_|{x\ڠ?Ǫ4gym!g=xl?[W[֫Gqv\VK[ز]%ޥg||2Fs-Wχ~ɷky9JF8}T{+ʩO'M-^s)z\n)=pӿK| Ow|>﹵n|sC80Gٙa{{sD>%Umb01ې|+,o`11SD: *L] mg9=G=v&<, j/c{+G&kA'qemF}rx戥!chi0w4QH4)@iEO |Eo| TRSdyFI$ 4$\^ MJ֞=Q2Ő }?VW0*kPnguOyIP)0% N|/}򊂷\yÃa1x)[݇0lNFmgs"& Z(1)|IpIV0)Hq"&2øv!5> !&`7EȈ3'F&B3w(2!FR?k2;_;t9.XPnfhsNxu ۝B1_RCYK ݔȰz2LԒrF$j{@)k^`}0ٹ WgTYMuuTkU6^枍`U8k1^*rp䀾EoԩlE&P\_~Çw'go_}:뇷gޜQώ}|vPqѽaMYm+ق%ҿ߀l4қ -Eo=tik.-LkOjq~Z^w>698={- EtsdpGuZ:8gE4"gC1g N "I:nR3=kOrU)v`:wO^&,Mym%藬H{\>9ϲ 8qN2jdR^ *5Th>e|z'OW\B"6AC@fb ʘpІNxo`P#A\o9}kSnz@؜X8 kO#7l;?4MH'f#ѵ^:'haWiIӤhؠkbY7ŕ B] @D颶 @8e tGO@610#uR/ N( QxnɉE(m^FH\kJ=p5PKwQygLDE~rXp:1k7:ڑu %-rzkQ"-rz"-rz7ZM7"8l*ѸzH)gyހ#R-,-#T't™21Tԭ#:7yg ;ݬŚF>Uz.P6OVe~cO9"iey;?ϛUo_0?4N:AE^+D;̈'.R+gq \gV bTmAL&ʌdXGhk1M /5^;_B;.u'VG^߹Nڭvͯ?wE˃I:Z)BڕpgӧI>4$>K囍 ٻ6$W?Oh h i)5I^ȪJERRR<RUVV~_D@54%nj}:@w>w)(_yXܭ nk\Wlo2>҉axT4\.;ݐ 2005˺!tW wbZ~Fګ)t۠^Rk>UkYI֐qEOo'~M[s7IW/zx7uEgt1 &frC-_]tĞיÅ{yuU7hdvϺC߻5q7w,"~ ]mz{nS*=Pe5Ic_e_0ϛ}Vs Z㤂ou>E&<;iPY?Vkܳ$<$n.]`. )' xwR츮%-WxfJX3)b8@ #?< N>{Bފje4,3HkKoҋ(Gc*ƴĴ4U~o7SE+,X&7Y,cZ\L28ll79952(9*>gẻ/2l"ݟ;Şq6NɛQv JCW7󐈔  4W0lԒI4%` w2YN6!W(WKQd00Cl1h*`ɛDNTƒDc*O)B ,f) Le(e40Ϭ1&d?\+MEKtlPKgŹ7uwsci1U*}EiȾgJWXÕJ?p(58N[jkup#IVє6/ xvCё}p +u9p m˸}o6nϿM,lhkq-y(tJ(m,Rǘ&PrаhFB7uQ9m, S2Jp:=5$/Mhbs%[n6a%wx\q˙(|8W) fIZ  xpqY(S+T RkRb(@H@xcxg[Ȥb ".Ш-Rm8;= )ǃ YD13'1LbS&FMTHJ;YƸ )H6V( +:PW.Պ5LvU[#^K=v)RÍeSt8>r+kU0v c!K9{2:LXQZ92#CSJ{TEHhL a06Od);sCYk121U6.68Łv/r5yUPvS |_UR2'(Hb jޒe^'b9ΪӅu)Ⱥ4SZ!"RPEp%m93 YD2%E*rlu<<]qg*[HQwxK82r<:,!ô= A7w ')Qu~ڿIdaUՂ8AZ+ON(p7/]v_@\p``%|///Jgߋ|2g7ɭ(jIWlcXdg/=#m~y nn7(=SGVn7 )+6ߦa|Dx R#3NV ǃˎ7d>sͲnA1nc[ˏȒU{P;EnԋTjm< +i2`z-Y\i٤1I5xtvHP!cE$-<6.ޡK  [jl,]4\! mV#4\}yoi`sw3&%*e/7*qn79PΊE-ڎ[qR7pl+Z0RdJ SF@ U(`*p}Vk|7s2ZiysƌQ\G삭nOǾ]w)#Y"="m:zYu=G,4(^ +a՗$?5 @LpVb&EG*BO^H˩5rdٵ$k-6OCeۘցatLoy5zl,ϖ8 X+|i%"ݸLfapܳe޵+@ euE+ ?X+c9Q, e.kwSk酤xs7;Qf|TC(Nɥd憖<6 2x1ZȑiҞj^g7P5-W:HY/BQ&6\%7slX3ONeʹUU<Pm936$%+?x`{\Hm)/ךPyˣpCtTC5+:z`vm7G*+T+ond6z˭@ݝj9lN?=*iU%Y etF{ZJ)cb#R0H<Mt)2b.Њ^Gz)od?Rm/&]t1>MWO'e}F#2 AvS=t@Ρ :#f6U9L65d@Ӳg,vI)*SH\H 'nB] ,# cvž8c0J$I`6+Ds>H*)E2! 堅 BC]c#nن}"PlI:ժ: Qdž.bapˤC\V`w͋>.}?4DN12* dYah1 Wн_5jo !D[tRiI )m`U)5c΃  sUwV#yqe#jR>ucI K޽zto!w]v^vkMmlA+c lg6Qi-it"[g PЎ2rۚM;eLYK8 8}Bb79952(T9PW5wlG< p0]@Ֆ_6Y|_[+(8oo-hWo.D\t#STS.| (;u pN6:\ؙ7~mOݴiUGWB5ɆAݿOop.oՇZXH˛l ߟ |b]Ww|PWQ./OOV(8 )(@id{yj $ÝL-:<(k)b°8‰^UJlVJ\[g4Z˂ߧ'VqD6 @ GAFr &˲ġ_m0Z:Vn 'yNhv~l#Po!PoWy̓7CO]R&cΥp6\MF'*kmH_c3R W& {ؾ|H|F?%lgU ɑ!GPk!ꮩ)57; cB)7j EέRȰ,^ -J)ncED:#gu-N-oqUM_c{4qEf Į7=').1<vkGU3dnE Qcd^y{?X?W"_qˡ[㮻<8GO"#h)r"=3;\#BIĎ -(Qj.q}0lٰ-Wz4`|oQdF{kGC1&$LEa;".VE$)qحB # R"%1d7ps J^o #@$@2A:ٺn*e$Q(NHچu14d2z"wER9*>9A8.zcZ 0ՀPB(:8®sAV 86&Ă1M ^TϴTvKx9 |=3TUR jГHl0DÄ57ER>L^^N ^  h)gg`Y:T~/Nx6\̊S37Ɇa!0Aq$׏t4 iaX62FbǓ@)ihSt9l04Q <|M6ΕsNөiXʎN\~158fl\>uSɆN:s0ɾ{ۏ_:_/_xoz ,@,bw"ᗝhV ] ͇Z.g]-xq%r 㮈$z gaf J?do.WiǼ{q[ 'd@o Ly1T1,(]PUiB\z1^CV&M jj%(ZFwWF|! c~Y VwW~FP^-%CGa׎Sx!Z%|(<㴇eU\khJqLZ6~llw V€^YL&]\E,uktklvOܹBEb\$oSd>FDPTrXx]V >U+}\)ܖO*x@O G#Nl-HD+!N4JYD5rNA\M|GmEg_:Bk+o5o^U}=qɴrla׵9?ub!۠LnL )&}.򮢿]G;&.88 Hh;!J^\bZZ-'Cis u[Oy_o39qJL} |3|a6 {=o5=:'t$1%xoʦ4+$CsKFDG=8_p 1؈1( 5TRA1V(,fxQ`Yʝ)8’E*L{RN7R:qJ#b1g;Jaظ_댜tdbo F|DQyY0fM}ϕgwi_*6%XR*%ABcTJD9> :!S! o'[oġfS8 پd_ bx9kmܓ?չGbCҽ0_h;J6ך)rI5F RދyCI_? Q){BXK !]o4!"D-Ksg:xO7 hF* '<-k5|/@EB9غࣇ15#e /aO>de0&ܣjU$A{˾<}/" r"̈́S-!D Ҥa.4p x xeZ30{-#c-`1쌜եnh6Zezh5xοnϣW@tk GWo"_p'b%Q@peft]Y]gEjKZ}ךȍMS(jsSȀ?.&Nsh]?֛Mth%n^͢ChC˼nZޕw^isf~Hx۽u,Q>@Kj]79i7_e/]t^i=7ߜbNU/ Qh*~R+Q[;yJQپөŰ"<"uREhU"Ǣ=tuT2zuՕdX.JHN9x\yS*L`+Яʚ L,Yɼ-80:adقc 4;OY'fIaf+)8)VIE-_ًQvF}+k~fj {]ڑ+ZWϣ_SVJ%]QBbdR/gÑ$wo*;dp 5e&̮,x*F9I9jYRe]ѽ0uԦ$#ƹZ'I,%f0ކuBManx.770Ysi3z!=i> ЅGDGD'r y,NtчD'*kth.Jw뵭/YQ=^r}H(򹖜N)bx)Y057m[=䅇f̍Z aoIܻ{⣕Ɯg.4 s7Xs`a?ٙKƥRw=ե'C'3+Fpn&rfxt.kdqeu(?tvQR;Rp^AR+R.wM/7^A Bi6R+GIڋߴ*@<=Uȩދ*,$Vμߊ[]wY  8iEg7 Ec4Ѽ|dŊ^;ق\4m+iU QRSBnoҦ$D ih #$!|JWt\A1sh37$+Y",u|2})^̜<ˮutzS|3kV+zT;ēKk۝_DZHFUJñ.r_{۵{wL]/ Bz Vz4K\RV~}HS^jSOQõ*Qv\{xgQ`qQԎeqzҢMƳT #.0ʘ[[~Dj:N XtV=x5}(STӠZ2z۞Mal(e@bAGfxDZ]&Kdjb0̪Iz6?dOsJ4gEيݿQ SW3h25wy]n)mub2bוKuYϱK9󶺅5*$ȣI+hrrZ-*;yi%݁cV6'[;'vo'vߡh]O^ejE`v] bFP nŚ{(Z[R]~.Rٵ r B(@Л8P=d:FY4Jo`h%4NoJb^=}$ֈs}6خ#T+sc)5CPWp|rbҍ5.eCg)%3f\@WA ~A \kxd!RԔ1тFQ4`pH1xo@tmn]QwTBg8KOdp> 7? VL7#څ^t?J+7-05]M/7Rnd =!"}chp%J7bi1g;Ja8҅$ ugK=]s-*ORw '!IzkrV&5lwvrU/rY^2Hf/`ޟ`^{ m{^(X(CLUmWf'mDȤv v;vLWV/Ͻ{HZxnWzE$e}>4 3.<3 _qo}u<9OU=T\oXVETe턷1ELK6QӢdkrՀE7nRX'"@'=-VnnCp#I+~]܎gg&AgCGųD*$D>~3ÇCP$`ivtUUuuU <+5V-NZ(yi*sL:tTUUjw69Uhsuf!*hw)!|VWL +mTyȫsiᶮٱ8BBo&WؽaxIƭ+ncWڔ VKLg~}KU;Bߧ^wTQ홤\omW庙6ݯuRǻN;)oٷ~;eul9n %u4qZ0EBL1V"x7SΞʆa 6V ߪmD ;2ť 0K+ؾgRJe|ïJ_SNʣ)6cPG/xyDž!ZH?Z S@^ܽwg|K2߻4P21CL΢ ༷T,NH]rgW3ӥ+;jI}~받Bluі Lgi*BN߆5;M4ӴmjAkbj?niVzUk}xQRXsa;I%yg]pL=/#7N\.b؟_`] [Dx!"swZFFE{ڱ.:_['` -V藭X&"b9-TZ>$w3 DXD )5 D0Ե$;S_o9+{d; [ޖS/Rc77Э.C,N NaK2 2Fe)Y)NWq-ItEa6g#R.d|E]IښgiX(x45' k )< BKo}'rQ4vΚcr&2HRTd)ZN4Gf&Rh$%hW\ڢIC$Ec#dYP $g 8[p5qVGZ= CG.4>OS!L%0J0ta$A6\cB9p9lJ&Ѐ״HB!хWEMIDnd9l( )3O@;x:N s'FDgTotYA(нLˎ&Mh6;vӬ<ܪ]UdJp6z絧499PD Ջ!zw,]jHDsʦsz]`WZZ6Lb+'輡כI6z|Z( Z}XE#ND+:?c&NiiqRȹQ+A }cAndw}#qq&)D^фo]BMQi! p6aLX@;EjWg}aTQ27C? ;@zxv .87r˘ HF'?רo'15GRф?wE$`(zU?Nxo0|=)bߜHQ- pKMͰflly3aҋ08*JJ7e8klNgY52(;CQU^6cVaܫTIRc_UeAuW0ޟ}o~wgyF9{o޿{ ph"l _Ѵjilo47bl_]M|v祐fvnJgfP@}A|W,ed=g-Tߏb*p8=W[s,85 ثD1NԥJdRAR*+l&s* nn!\Q&v]wn~[P-B#}Kpatzw©ŸP,!(rCo{FwX`^ Ѕ 8WG$ o1җ.c9 aٻ^3{pDzX7yk$,6._\s! 9^٘k)`*:(A+ѧ²FDu윫RLT/C"WLR\D-R`7:UM4|`ŠV0ڗZ<0lqɒW7v۸=-=u} 4m(8icNȉ !,<Wx%04coOOOf5mb$xkGd/y՞&/d(] v[֮'y Y cZ a2[?.ӎ+'q)rgURH H?Ab~9 [/ }zTsӣ;7~+x{n6X q#L\Mf]iͤ|azQJ2FJC@cXY9gOCG*luᅒDpJJ?=uxlV<떿Oixo6n\z?To^UΏA/jzW*ߙ\zo~W-]W^Bòw̳SN{8{7Lԫ3Q~"ݶݚCNZ|}ݷA#, QˁX;_vEǜe>ȜLr5W<9畡X\`s6ѩV}9E*(4D42s1KmԚ3LI9<b,9A]h{'ok&> ( ?J/gf(%;mEykg_)c'"+ 1xx-{+(1; HnݿIB2"/H~h/Fq\PĠX@w^ =?u` WStb4BAݍ)OD&iO.QTMh[ W,ԍR|j5Qr;rf j[X__b॰/BH 0)Fg:㵃Gx ! +2r=c% R(9H0 MnR8%ܜmD/(18-(K$F)R<`gW32lM5T9dtSn k4?pF1_H5tϥ;ʬȍ{KXG.{5 QB!0\݌g^giMsJYTS/ٸ9ǟ]whK͟yJ܎ο?o?:d}s|k-qF 5 7K쾳qvleYqH混hVbb\e)ig^B2M\1WY\z0*K*K١i7l} wU۷G*?鑻ꗋ_rŢ2&n*2`pExaoo/$D?sz،*wqK_!%V߇c+m}18N>$Ч4syfdYY$83p`&}VUXRk/*307٠\-s n2(0 !;//n76>dY z >k;|0b$@qBNkaS ǝz-7w]Iix3)pca{:?m`ޥB a IO;VS?R[?<3OyX缪yC Ԝr3,RaK!itU_Nk1ım՗o"o_QGeĄf:jLX$.њK#{d0D9Lt9רٙCQ{0r(i,e;Q!(8v0)&8I fiPqN᷍xu(yG!`<\(u9Kq az@Baqyr:2%*\9WEnCR4'Bb9=u5H :q)%58P!٧*\0s,&bk/A6$Qs-9˝RSֵucő7W8L~U"c%ه-ӊ{&!R<<47RV L}yfUKnp@!K Ug(>)g0ɅoW/}cpӯA~_&R=ݔ6pd`Ŵ䣒#5;tõA-r6[F̭J[Hu$H,"uDlN+U aN(Esfͭ$G~[--@Cև@9J+fʠ FYx`U@J} 6Z#V)}J<e_o>:pj=R7 (,  ntޠ_'S."'oe$cߊN_M&VVgRΧ(EV霬K# B3nxrpD)PP.:M}C huX*nP;V2íLlXbj(|jߗ,ݴ (bT0cfR܊hsȼ8Xy>5ER!`0D.Z`#p̝Uy$bDŽ(1a8bs3:o-c>fY] -"3[c81A |dE`/%FX)G4a(HR TZsj#R"%1, + )l2bNPd0B'Lйcix5+ޚcϬې.=G9a%HpI̜i5KJ5 v3-[CS3:JEGXu" :z!' fLvefe®ϴTVKx9 |=3jCAN"DSg}Ys!g`kۇѫ/50g h)g=ӏUuS!YT³092i/B5-2˽-Z5C!,ͼ ̇X8PJRr?:uUַ:d]c圠Yt.#ej?#3,݀8W=SdMh}YY_\gxF~绋7x~{upͻ L_p`&9.@v.S8ڡwдhiho4UlorՄoѮDM;#˾BY6T0םqq> `qۗ.g[YyBO?n@q*ÂY*pUSI>dʼnmbJ?t_E߲a~/ iʠ?)@.QmO6pINbgIdURB;'Uxq6 X`ιLA)>ۥ_@Y._rĸhIZ}:1"#к0t*('VV:)t/ O*QA (>09GRقZ uLT+!J42eP#A0H=h&(wnyK;?Bk+w>虋2jm1b OB󽹡>ȹž] ^* \rmP&qx#ey(yhy0Ê9ˈXi%f;x(wNFqq <VM0u8M)䞲qslё/Ң4[my+(4h ~_,RpP쒖sm#7 o|\{n:|-F*+aǰ֔L!HB1V`o^FOۛS}Ԍ_Q}i^ Q){BXK !]o4!"D-Ksg*xn<&>!XQb:^s=SfC#FFÊW"|YP.h! &i[.>+^K7/x9(g Zs#C(A(-<}x,E c[B@I\i$0" "'#H A(7!x0k5f,`ZF]hͭecҸ_OFE{@Nvc{Pu0Z͑-*%xzyF/mvC.}ՃᤨfvJw[wrm) }aUMBN~T bx4P:S,zxC2t,*9+jݾu{y͕WiϷyzz k꛾]464?湻O7TM蕡b/c25P3ҜZt[ m^]Z +TEͭ͏s敔HyQK@A%ȧSgh2_>+c"/'?SсG?Lau_~;3(݅`ߟ|[ooo5wIf_=Kh%/P;3;x:E?{ƒ"'.keK_4x?$Ϧ(:n B6Tn>Qo/++Q{}O.m6,^7Q\ @vǬeԺ,gPvՇzlg@z4j#YEǸ/Gq#)np09sH[G‰uXT{u!%!=Ԅ&MGK!zƌƁZ1b"iZ+IoS⾂$Q{7;C[_,fz/?մ-Z"n÷4Sl >Ιf&ؽ5Hjs$͙27ӜO/[D!_by,׸n\^&X`;G/H 6_S7$|>M}@G"`KM-a$EV 1ztEAW}3X:%nǮ})3_N˭Ïl'.{>YOadY7-ߜ$!n-2|sVoVp.TWsX Jl6Q8Kk˗yg2sEn9ΎX-S)֦+y卩ݽvVSRS;nۄK<][Z)wJRntX-?ҟWy5x}2,Mj]+ n˥ZH0u>W,vYs*3Pm}7߬2(媬R%T>ki{-=U-;O쁁B3nxn-9!)Q_Zʞ˥r<=ɲ䝧i~?9hreqc*h%{gqo ÌМX#S|4Io8b cYɋz\9^wzhبbF?8EG|=$]'SҢ#Qr ntޠ) i,67_'BRVibK9-ʀgwO-0܋9w=3<g ~_Q 1As?Nl[靦=MkGum75mWupWx0ExD2h-#sa5y$r=jf+Mz4+>v99s]Ok3gn]'u[MA'D/\D&y\ \f4J\_UIQe6R@aHG% E2>9~4 2O}. \k҉".Fvr5.p\O= xrM𬊴RV ]4Hn!SZ!B2tJ`U Jn)`_¿S^Y7Zܓ8cG_ hh42>ډyT`FsT *$$&D%t^@/H’zhL) <*,2%ZB<`]H"<*kKHؽ*cU^MIޮBpjT,eAry QFI{ ]JXs*iU)ig-a-ŠZի:3kiϽƺ)/p,,Bh&ӷkǃM͘cyYA >qW3k _ۙgؙoڱR>@fB*A$`JL$`d!ց5Ew6<Ҟ7c@6bzq5rsCMy}3tzثǮ{.??zS$8ڈ e)(tIaB$XGiGA.*Pvh(Xj#jk[g:EKbLx- Ublq d2O={]V2nIh&֒VNۡ|اXx~E,Y"^3eW} K~2 ϙ.Xe7wherjOxvU©w>ދ`4AsȕߏZK2x?* ܮ9EK$)w r;ܸl7 7_ڠg<_#EmC ]\Lʹ| s9h[i8]XgYW~ p8 Q(iNp`c)d@JGt4z|^ ~h}4x.bXu3pZl;tm3`gc2c2J1TZ+t2fAb z6\蘳/``wK 7R47 ja1GlP]je~r<[<`'x(Et$#ȉ|XG&4z-q)T3]n _]*~2{Z˽fk}[u-2ʳVJ2uS7]wo8k.i !yIx9/XJ^^{#c\AV]N;1У*NxS/PNYQj}0j8a^*/]Ul2"]|r0͇I(R\jR gD΍bNE]*}ξwG:\f%x`RxFLQS Rpct^˔q/w6=]{mҝ&91yŽ|sTwӶZY| _ޑPo͔}.0\˵|4M'Q3|Z.X+f@M>k#"`,&%>=}_g]d>멸 rn^~i68vkz.}'9EYeUnܜPKH!p 5A)gRXRrT@ dZϊޡm. x6Ύ?9X\JH-2Y)³Mw 5<Ͳed> U0`O0<*=rȂU♅va=^kzXzWUdSGҰy?Bֶ2  p աwSa\Ξц!~<0 X@QgNVFo:϶\=6uLR`YAgQ{KᘰvWQ)E[lGzÍ渻a{GNYb3%IE_OE_-mh5p;.(5;B 'F_\vk|AR﹵Vj=JQD:$` 3} F)I],ZJ`zq~uy*ƹ. N^Wt5O!G (6:56:j6:j6:gdRP%0)ۛ,j>y5 8IP&s@Rm ԌU.# |sHE,e-ךROm]TwItȤe0*"NzTaP,Fӓ+bhW<`l+:ҒHrF=R`ϧd&ȹHj~=R26z=RT[iӰNSXwyjñbn5s´)"'2GlR'w'kFm{Nb1J&ijb (w[ %Q169k5I,VhutyX?ϯ#j 8!% "x<_gL%nx*i\_.heRTYt4Q#((Y)D<)b\pJ'Sh_xߴ9E4~bӚiv7/O%·{x> TIV_My[s+dz%k?.U׊j%6+V }?Hn!S'ܫId 6X[J؄Jf,Fv͸E㌧BQX ^=_=)"m82ŀƎF/;.L wn!p< A1TBCI L#xKdk-gU=0Q KB*B@]&ϣBa x"^"J@!օ$AEem |Ye 1)!RB(S<(`) k2Jx#gU|nWBz6UIvHI;k ;Pl[ժ^Yl'vDմȏQ$*Z6o!^oxйBǽfC\!F9O-?}C^Ƴ?c'mGLҎ+8#IR0P,tPb)Ub5&ܒdJV-5OO_ ͔ =>i0\M}No{Foi=]zĄAZfuK*&W4 #,hEeHL(Em18(ƃڢ`seLp24CbuTI9IlZB{Ts$ 6L$9"3Ie5$6IA7_P<,UhB]&|/9V "}$V(YZhfcR2+c1%ACk#WC "4)CShQE@V;!FNpY{eS:'wme Au9Hˁ()P6X!$ḨTbQ0(tkNBckͬ5YeTPYRvvv%S T0`>w^6 ptWMn Z8l#)a ?E#BA \ H=7Re0HG8)N'kTigH l۝u4ǝQO.pswF9$owz;23|&r$ R+g4&9R"q1:/J}Co/Avm4vvO!}U+^{g|[#7L*%7a<{۾2Ij>0n^Q)14M.Jrcb^iXV[T|hStBДiB2=A Ҩ!9SCȺ]d5tsGC&yyֿ =krF^s)j)ls&jb b@ܮ` hm#D jmtJvFS[Bqb@Z'g1m"6 A(j`P$O4{e V4%΋[=3eov .t\Kϯ;qiͪB,Ybk5O ^y[mJΟ9FοN7i"s~!K˳W&JOQGE8eei?RZlsPN2muEHi9%ꩱUdr$qҨp4iv4rMͮu;\.VǓ@sJ%>9Nq& j1^*}N^EtR lNğuens;'_Wo^/>#޼0 3TfᏥHH#i%M/@WU6ykG13}mZ@/WA :[yֳC{- El2?,MM7r%|U#N*~4b*aQylo]7$:"G55N>T֟AvgztƌiOycw=7͔(V_>Z7zҔ4óӏJ*)H# /TK~⫈˟[vڠS\rXPH5~"}ŝZ:8gE4"5r?OԇbESq?a])@ }ݻVG3v@9ӓ¿2_? 8Qy͸1*g_dnl̵{{0YgZN&d+/ YJr2e̩WRZ>lb h WR͂#XX] G:6*xoà |;͉K {b)o=ZHǀ4zYj'I*I' MtQ̣: kƊb{G ofqU_ةaM|.vEg`}(?; j˱V܎j㒚I^⺨8P.z:6K쌃SL;ϚX|P徉(]$nZM;?u'ȑMQsdC#&->ԏ+m~J;"0ѯE^Z"bۢv\E2\`٫f$yؘՌr_4~W]e_Ŵ+ QTu)EmK' Qq- 2=Հxb;4)KTLguQ&1Bm^ucٹݬc@vd3fiIqA6ځ׳H̷n18\>deȘj-U\撊֘{ùކ0W:eU*siZSW\p-WȰ|kUfĶ̭4.2J +wM0ȭW]E\en5lB. az 4l̰[#2lBn2 +|Uf՞*s7^\e. )+{dNgNOqf*an?uZ//o.]ް.7Oqpw&%\TM޾wUΔ9#iPRMb`=q'H},\aq`n=c>QX;P?*RjpB$/ת2y_UJj` +KDhT9@*d˴CBhr2t`5hG' ǀ*=IY&Q\ Y 8YJ wXGaOC_ɷ2ʘ:z} v{$r}DLҎ+8CEI2kNR R~=I&XA'8IolV2R%)Ĩ5;Bˌ{}33 i?K8ΜHEZfŽbR{E#/9 r8-nV ҮY)\iMA{ s$6\H8G&"Azi4ԫhH:Y's^Z'|D\ D34~M9Eʠ+@\'^Wm\c1%C赑! #jhR*[Q0ZD+QI^$!5Akk6#'x!1Π+qb n!#gy@-2zcsR.KwdCK$)~ķ7.!Sn;iǓ8uB{.9a *n1ry? fyQ\~;s}~cgK;<~S҅!yF%Z@ _OWwE]O)[pK{sVz$k1u`ĝalҢ.Wqڅ(P?Y,ּYJh *P(Pf]f݈fm*i*%Q1 F8O R!ht:"BZ s$PMT&P.gSG'LV|2vnP]BH|cMr3|8ߛq<>Y3 @?=͛`<#)ay^\OEN2KZN&Wj5%tpJF͆M %'f~~͇u̬!plIԹh%Q'9bi@hwZ&gsO i$4豘m„'k4CJ=$ :(MGkmD C3RE"SU&yם5X.57/Ew>=G{8B^r 9r !VC (ӿ ۴:nj`ܠ"ɇH GHµB ]0g2>OۅڅtԸ 4h0ovVq#)j88rhTl"43svjp~y%//'Ŀ/pOo|>!jPiC3Ma Qcg/aub"i BfPb5ՍLl6L t#*'d۫ƿׯJYJ?U &4;gq6*#9̶02:^xԠe _{ Ym̢m|AӢWU?w?]TS9g]Y[\I?}|wpz՛ˣOo?9z2}7/qE=0>G8`AbᏥHH#i%M/@WU6ykG13} xo/UgPGQstNY{jP+6.1o+A%cpRScx# 9='?m>):BV80QŬݟv=<@ 1zXo()Q45k |@lod:%R|*%~DUD tLN:m.PP9_o(tsdp{W߾N-cJB 'C1'y dTNwfr'e;l)q =qcUY?˗\t[.kW-G WDĬOγl-'IR2QFL,%hu9i]̙9Zj^^^VMTZ|{.EB6`6Tvh(8ٵ ʘp#hCk= Za /Η3cXٜ 'jo|e~{uF]֫ڸlj{C>(Aj_78W4VRږN ┵DuEuGE3=Հmb;4)KTLg+:PTD(G!o6?{HPfG=fFc_erIruߗLɒd)lڭʤH&#sIeX8YL;SfU>r%>rs٣nB."9ZY`q>ʳRK,cUԪ*ʍ\5ū7[ Qp X&_.CQ@`ҧ`\Ny`YS1F2q#Vj8 rDȤ󞫐$">KqM 1^)FQ)'wF\LbMMǕfJ3Β=9~kqn.؜^\ jj5Z !$R̅v,b'h6[Dy;gtOq\<כ VX1(vcS|[cJnǨP^޿w;'_]b*;d# l$`,3L]|ӱxӈ &Y;x`& #e^11$p]Ő! b,( ce[)YhaG&OvQR<6ZH.[HVL|/LӺG bR"E:Ps$x#L+1RO /Y㇗AB2l+3ZvrhM vw{_?3KQj<nֻOy7͞XWvh6(Dl=];}y0Nw;ӻxXSJ={meCaJzGjj.ў.~1s3^?g:d}s|sư.-ٮ&,ۚHj0諿_ ";lMPE*uZtu?W:_򯢏t4`$&cyJJgs0$0k()k"+ t1ycb\+67ȸVq=Օ ¦۪QQny 9yu ?QwPƜ|t7j}(@!CT!.@ kGJIĹ H(^ R9 Ilu^&yrq{bkϲŋv{\9ߵzlب_ ݐt-cUǶ"5X* 8#&JEWƬH[yA'hDbwTMV5mHM;k @) uϙQY$k9@/wՓr,?~Z 1=dAqT4BB%:B-e^rAQFAHKMH/,ǒ ˍN(:Љy5;wZWm@)C0p6gԝ|NnG$Xv?wVyh%^*+0T[`,Кcɏ8]&)uQ vh#!DBG#h0цd[: |EpAR0bqt='OVPjVʗ}+gy"ϒ#tt3p0E x'!1p9]&-Gyj7M%BmJcT5[VMe$n^|?X xfz滋/hfU̪'%B_ʼn*őicİޕ,ܥV6.tygErcr[[ҽ]i~Z0b6wߕCJ'y-G;$ꖓuvڽfw=}nI`gFk8H]2gVUg?LysTʼ&xCWϝPimB@'}}u#\CU_ոFuCy=qNy mz9Jĺ$4hŝc kΕ"ciTp#ڐO7JIbCь ω9,0͇XjG^,lu喧C^P7IC7I/6 D' 0 qYK=TrM"55~({9({1({9u2c+DR9Vx֥ɵ_EYx͒;ICMyr7\e:3DGkq-LOpUh, ^ɀ!&#U2ge܍RNC`]jf¶PT[8x*i7{"7ۛM~u;/~b"/aΝ?3k)l2\J{EW1(Mndt!bta]LNs9]jvjV[V]l ST'56“XP j'K#]ZPbfs%C:VЇ$C_TMV5mHM;k @)h 4&j|?fl[R1+5XTJB3 VP+s(ɗ n: Yy _'8z&#]!J#NgX /ӤqӳYxZq|%lXiHzH&1K_5nY7YoM@Ǘ/tB.U!C1K5݌{oۮ_a0SpZYmG{͒>i;gch?|Q˻,fw~`?9i+= ?$&7ImW]{$a~6lAo~3j0_`3.dɱ c{ ȂH /1p5rg'OfHV.hBj((B*A:AlQPR07 ˱B(Br F:tbq^Mu4w|D;ktNAyˎ0ZovwB=qPKej EZs JqJMR j FC4kmH@l~rr66 8AB?%iRKRvW=ç$>$@{9]UUuWFGGjkh[g:Fx-*U6^Y_QZH1wſ)r '(hE0D_@ ) ;W&Y)8 Q J5.j0a/ z.^,ˍd-Z2[e?,|T{Tɟ RMQlvq\\5W`7dZRXJg+I 'L P?[YQhbchiJKbθ&B63,f'P9X ESII$Q9?"<5kߘ8cXƓb+|q/ |>ӣI=tG!\RNϞeP iOPw5˟7z`Λ*,k<>{=cxŒ[ob/d:5'?i6b]fpY^7+ϭkJqVf. Yh%Q'9bi(*(x`,Q/\Фf8yNyλ>]ϲD{)IšSdyFI$ IhP7J֞TsgI[g/ t@DW22'/fJxŠC9?g FQrd $-@c<k6W~ed{S^s7_RoJ|a򆇜jVC 鿗fZ<7 7PbR$4WPWKWP̈́aGiH*cZ/Ҁ_K AXō.8 I6Dh `ʖx䪚 !]u ~>ٛpBq-U!CN/u;Wܼ-;EfTs_ޤIэ3AȒK{pOuÖw`i7TκyGE~*dFo83Zr-핑սN^rYKi$U w;{,Ɨ-uw*x9{N]U7ߝz]o/~2}qo^7Qn׭wt-t͵ؠk Mx7W&_yIwG13}eR ?m/ cOUd=-TN^n^('(6ZLgX#JPTE̋bo*b|l`wzn,JQ[)5_ceWqbV߷1ӾGaߘ{ef b<%퍩aʸUNO__TRI@ar1~V⧅_EDi.941^/o>Goq ooSaQ% !Hrf𡘠?)~<ú+kU3(gz燣ۿ8g~eUB/%wR{/YWmÎJL=sHy9iBJ&ʨ`.'C jSuve>1R| F \!(l l@-8ٵ ʘKGІLxo͠F6qts]ʔEcsb!@A6Ooaox m𳗞_Um~D== %hWƗ&pe! \@颶8e푥mծծθj@DN0#uR/ <7 F k(Nz'{g=N|@\+'9;fi-IAW|b}1űEGBGFE̋z]I1@oV `}g$^)>m'O.}~A85b(yZi')IJE%U/agz 'C'5:f BB:梭Dw.mkg|3I3{dTalfuZ\_~}V6;to.;I5[\W7AoXS8"LR`AqSf0zvyPߣ?=7D=3ja__.&`D_.Z^:l))a  ֗k}r2kK$XK"Q,d8KuRS68C`qZ`J$.}BEF2`gp&eN}&GA9KVP뛦jLA><ڇ}Xr.8@SsЫquzcNvgh[IXԖ3͜&AkVi(MLi-s 'lu1:/$C(B/㷩b/M$+`W߶ca# Ǡ@фch1YZ0Rj҆ca8H\e5=s5XJ3GlT+I4hyD xU]eiR]=Kse63?xqohx*t|ٸy$Q8~J7Š?M7PRV(:zJً\W'sފR)'R~,f:Kԡ,%W~fZS.1xbvXUVC7WYJբgily*$P|ˏ}i1y*}Wg'!$mX2%Bs( *䤘ۡ>;Q d\ nB;|$~.µb dCo~[%ԽzkX BUZIybQ8JlPߛZfC(3Q6&DR"d `e:P:1Dfnݗ+6h/yXq\p!!1?\n}hDg.8(YAuD M;#IRah:hq9QV r R/O !AIofZe2N%)Ĩ5q8B˂h)^>kqvä7IL9s NRIy`@dwJr8-n;#s AZM9M T !Q˕G 8G"jÅs4 0)ޚzII> J;Ou>S&9P_s&NxfZ%kS˶(U)]JhjSbJ V\qC8G$DhR*KQ0ZDPDj/ bM=>؃=*|qS݃$U9`Ё'ǔy4Y\ DzǐlSԞfe&ȓ1)C =RH!hpmc;W&Y)8 Q J5MlyeX_QnrQzp f6G (ʟw15z)WNn߮\VtC~XQ*p#Z+*Ekoo%>}m~?pgcߊoN.1.ys1i(tfϼ鄚%2p{Rz{Es=ʓmXZtsMt{pBd7dZRXJg+I;P(+b+kb+3b+bch4ڒ D#'4:V@N`E`!5Y*Ib8aVӗOz1]՛-]8twHKq>᛻߾JFW8gie)T{}} f(9%$5X u7,>׾p)lp #g7S2j/r"m d_x8v5vOع*dBo^٘M**:E+S)N؜ޥS?q }l>dc.Pyt %u$Nvcb*e| N4]7(͉ S 썻Ώ=zʑHy2jqO{R.[߭6q9$)3"%q+ @҂1)0h3Knu^;x΃f2AV{=et1C⤽eX8^L(zۮ Ɔݪڋ{ZK}xFg\o\:::Ҡ`K/|MM (e+1T!8JN/#Qd!6paQtY3DE26r9[Jd2L+$&:U޲h%~4(je`ͧ-hIh免eQh8[}J>(zL)ŦUw^C3i8x/6p׌s98K(vy6@vw-&G ) ^6^ት44y"QRռdJqټ[6؞E%K}/Y* ] 2-@Qw"',O2Ӭ(:x-4YG3QhloB[k`.3+ej bbZ`>rQg+Ki^&@T$&gNRQ+㇗I8B jrs\v"]"-0h 2'd)}(ٔLI4F392=`g+ /ep6`tY+W_]WfFbpΙoe4N]]Y]O ]OW܌>|k[ V |L]T-qьE1 [N &mtf̕_4%d:[vm͝OklBa8ovu}z=o3]maW7t]P,*'P]ɄOϚnqev9s;4.SP\Eϭ5}sonT{ oC5, |D&/e,Er#Ldx$s>Hu2ͅ5tVNӭSNKJ@+e"J,K(cBΙ"RPhR[]'Y9^@ՙפoqϓBr,]?>Ƕڳ ߠ:gFN7.n҂=LqݤT渊Vѯ[g:4{PD(=R "U)&OQ$uQ7!jQ[ =\\ӭwiS{/M~ Ǘ8FLAo9*&!^܋XM%)˔3>&EVH2õ;ɓ3"1 8yaMyh~lܗIYǫBuy>#Xȗݭ6=BtQP7S*̂-kz(foɧ蛂U! xVlVKT,&6x_ܩpy$|nUTzк `nig UP齑+wG|F|GS"hyQ+mDtVb&eBaoE-ON}Bފ&7ʢ(VL[T3z-,K/ ii)cs'u%h/?נ1˳%JC@b0D686A~_ݢ\јz4Y\W?v04 /GB)(=LVq)V<bUu8äB 6s]jxa٧5 u*kmIRm93JmH߁brjqS$8r1@e9te4+t0]0BP2*3zvBY0'rfcGY:bJ8@Fe&f˸VPB{ϔ8?q[Y}qoO ]=s[l4RqM ODZZ!=E\dR@Ke3[j۔C* l B%%60IZC4hSv-q6[l?hvX*[mլvCf.QA()Pԥ y9u\)f`dRI!PH&trQWLRtƮöުo455lJQI՝XAuD-@dY\D,?%) C7 |>5"$ )JKIDFwZQQ'Ce#tSq:$hq˝Ju*Ye@w_*TI͘ sUVa\2P9R%$SA]OiumΛ.՟Ϸdc!F%9[뙐NWF#vGrJ]UeΪdS:c'-&pq*Z{'85Ax8sk'_;{CO10WͳEJʚƲ\(N\>sŅԞ*rqH)D3W\ DpoWAr|O7e{κt. >/gN|ORt(5FԈQP# jDAmZQ:թEZIݭͭ}nk֢Y5kf׬5^kxY5k׬5^kla5kf׬5^`Y5kf׬Yƶx͚5Mkf׬5^@5ke׬5^kxY5kf׬5^Y>ڳT<whiPRI)$M/W/m$ĉ4A` .jr/b2R?v孨p#Ul$338މ1SXJ:`_݌d璆DpYc^*D:wWziz 'Y]hRHK2; BbRMF' F'#u@悰(^gmmLe.C40Ϭ1&d%>Ni*2]K{Xwm_M'Ψb-q2™y3pw #ǽOY.V#CY;@ ELҎ+/$II22heU:D*=!6y%|G=I&%(clV :@JRQktD ,]oKvEGo'ٔތӒ="lG!K2%opu}ACd#v6nd ?̌c5#T'ѴEʫ1B넗CW^3ɰ,Gurk|2Sr\@:^P@ I`%QI^$!{;"G)e.k%6W,ˁZ4ZvjBd^M/v:Wr1J/sPb"-w>ңI?tG!\RNϞeP iOPv5>{/^Fn yƉ:ǘ")50a 'S؇)Y)$_aE+ 8KGRi@;|Cgz!&=6g.uM0eF'-ӲңE=J&8딣4YQIBZ$/g,\$8E2Q{ }7 $.QL) ^%"奶ٞ"(9d PB@SNj8u>Faa݄) L S`r=)-WC,9h}cda>آIA"ɇH gHBY!`RJ.E&̡ ø^m7u-B&3fa7\ꏃH3'F`717˟6UN/ܢ-߇k\TA*Zt=g,᏷*!٥ΚqJ2#u$m> a\ &NjGj!%kzŘw麗uTF֏:{ɶQ[8ϫE]YRWNKZ߇שS<[`X.27ߟ}ͷ~2}~go^s:?_Egpo: CˮfCs-6ZBo| UWn(fr~n*mo>3W(y@ߋA6h0abD*ox*A%spYSJeF?pz_ZSV['y8Cc3{z= Eղ\C?~BXjviU!ӓ黛K%&'7ju?]Dp=՟9uhFfÛ/Tү5-6_}NVr2@!I |(f(7?o97 RG^룙;Ems vPFF=} =T>\=~_֯z4+hoEwK}>1,; s҄LQs:K :]NZ."0B[{bfJz<ƅ=7=r/(p)"C S@ k"1LGІbDxoN&qz{s,ť;-K/kwQ\z}/=? pÄ0c}JЮƗ.jpe! \@颶8eIVJ[-6S h#ޡJcKtEEύr49{M:I;+q`rIQ N5ڳ='^^jsO/|ût|؏ ""Ns<.e@Kq3gbЊ /߯^Աιɫ?v6ӳV\FW?^܎f{r0+<*R%8E(ypѣ2䬲w+qЋ[cx*T}Ѥh+?=$MU<޶vyYEE=.4nhNKӯ`8`6QMuގZO?mW<"ȼ8@n,>nsRoƣE =c3gnņ}ݸpaSXoݸq E =7zP/ʖ:[ۤgv jc}б|\IEӜ™jVy5mm`{w TsRRST͑Iח:@t,čg/ pؠzD' >OFY"+Jf<{CMj _3] [\LG>xgO3ht$bT-Bue`2Qf$Ey:鉶6dv|= +vz֯&Nxp3[7o,"g֮MNw ̒.妗m=% ]gNNY~?A/7ݵdg}o('n&a/ d gKh\XzسPMWD˫7^]lJ#I&kaP,8Yox/EM oG '~ryܩ6IYJxϭRB)j9@0Azb޽(KB';99')fgeP.Rnn][tkr>@J):yY:2\ (FchIS s,I|Dx Q<({8({yDeG匟;E `cR IDXuF(Ek@5)x+#6:&YfBjG#u7hdqHFV@pV`$S 3r6FhɈo^5mXky>9Yoӻqs8oZ@*ύ.瓔5,g5sIʚؓf*ꓲ~IY(}37fNْ5ݳbiU0> E&g},n'0~@beVnϴl gh3L}.6S++Le],W_,Lm5\6)Ne\ayy}RQ8~J7yJS B0WT-tp](ؽaps0.֥K_"NAm@leXV)ٳn\ٔ;)I䜻RX#KcC*sM3!3T˓HƓ xrZɎݓT*{r_'#RV/BAXJbNGhJ'+5 RAˠZ} G; vv<U *l4Q'9b)%T a%4 p4H9hJQ#e.R# gՎs2DFQ&]P`HǑtFΆY?|8IWYM$S2%B9Aq&Q<}x׳-ӲZ>μMJ'QE :(MGkmDH(^2EIabzO5(NLzDKJ};AxNJk.BDW22'SU5]MIhEB!JYG8 *CX85[PȡuΧls73)T ,ׄKٻ'$UJ8f 'm7hN/~uv ʌʈEfEӦUJ5HTG@`TB Q|V_^H 16!،iB] 8ϴTs"&`zf$)R jHl0DgC60vu|<>\XV|oF{T<@Kupqa>Fq8xw^;PxSY6>xլ܌&01:F"Ht#]t C!s,ü‡ѸIx4ٛ.\ f^Q \c0fϣTUnˬ(lFy[!CoaL|;Xi$ЍENX}P/5hٍ>oqL7ߖc]j +X€o}b2|r&sjlVN>"߄mĸhIZ}:1"#00L*h'V޶zMV|9ōO`3sĉ m19q]ItJ##9ݟe9 :8Ğ qϵs$<>'B'DwtVG 蕂qUO/\JkWrn3Pix`,(U2xi2MtI1sz?sUg{y^!.88 Hh;!JV\bZZ-'Eis eI[cR;ĬݫGjd~rVk/;^sAac͌C5tB?!t>p-۩k!}."f n,*6أ##1SCG+ɭWˏ/lp"gkp --% ?}zpt\p 1؈1( 5TRA1V(,fxQ`YʝϮ#2\EysyQI;]Cc)y-Ѳ!`mSU+P$4ƑH2rhD6K݇|ϼYAo߈AӼMe4t$_npkaYA+V-od(Ji_?9ӹ0|,#1l5%Sȕ6PjaDo(8=X ғ<4S & ChCEZ#X سJqTQܦ]7E@Z ј#f#g;F ]HSA~3i{{LyӆHN6^ _=<BD.pI_9ux4`LɹG%ZIF 2=N%@ `!G(L=%L4"MBxN i>Hy e&#\e!x0k5f,`ZFjfhͭ29;:Wײ[ ˊI^.8mZǖ:R.^U%Q@pfthݻ_Ϊ>bNBkDn$m* U^I@h{jȀ.&NSn{tK6՗erFϋ;oRa2o诽9~n%Lc޴4Wʆh΅;M{w )qv>N t\.P|I/azlqimٵg C#A=(١hpUHEDGT ^2&iɄ$xx#Pe;-jWp]oMqdbCMHZ] rX\M$O֒BXY @u)#"b1h#2&"}guִ]Q> o.]fg .gmp {Fv#㴴+}kӇ./2̒,盓䛃Es%cgugtꥯq 8̝V3 I-wS.&Fˇ y g3sͲ9zx`X;j-*%ʡv*FԝTjn< l=)w'2!O7nv7^ʝ[%~.-PMAbILaF},qo^m9 o.cb̦IjZ )aygxLe$84;L]yJK:k_`yY,D oEM1o {_v"l]p^`dQcP28k%V4^8bC:^KJ+$L+EOQg eW%])#l:yYuqkBrzݻ՞J,yvWJuQu_ә g Ak\"Bdɸ᥵ėpD)!He /{q^ >'Xϔ܄EEK9rroDe>DFv -XN@zmS ̶Nvq򕆹{*SSVP`ؠ[EDbQNE F8qqʁ JJFdZR | .xH%bdF/cL![2f#gd܌R" }e!, y/ ;ɜ]ӭES7N'nPpi8xxW i3`i!+OxP$8"ʃԢHSf ǒ:Ghd.8T:bVsgrAYf#g<65 SǾQfD͡0BB20[g($^2:łBP΁ B̥{9v;۾zN;yN9LWqTOU"Hf9`&U?11_rx1g#SZ#Y0{,Gw@yzhM ɑcE` bZxnH.%2Sd:XA#s2T0IVuͭjs| w|ifM1]@懝ŗ q,2LbbʌT^H+X#ohF);J!J40uVvh5Z`4wFm٢Q1HBSErf#g;ߊ>dzqbAyMQ[vEoYɲT+ݲ\]P6>^aw,ԜJUvnX5BSFszC!.N.zB hef{{GJP1aăFk.`#$!dK`I7 #$ l.c}J!LE NH"g49_wẖ$Ktca:Ѕ4ۏuyw1"|gD8"2Om0W!JGākΜVfgH Bu+¥&`BL%Z`cSkpa8z<+q&9t8qF Lu 69C֢APJCDHXo0 e+Bjkȴ+ٻ6,U3 m}0f'66 831:%(RaSyJMRRQ`Y.VUWޡ8. åh`~MUKQ25 4V tX7_UQCwP<B2 _5x !-" 8Q &PE&:; B=s^#u; m5Av5!mð &c kkH)EВ)E 0;wն0Optfx.omvc:ծ9xbj[Ti4"Anr, `dו0EA3XRhR>0mC3ЃTdR 󠴒12(FYx`U@J}ʠ NWQ Bs+:2," #aREb$)\6%HMgժ:m(zcGZME1QkܗgO.2p6fW4<Dz-N[^R ΝJ0hohh0) IRaX$@U:>ءdW=4t_TD260@F`k%FX &wW~=ơUŜ}vf݆u1Uu+\t)ic,/9&c"ՀPB(:8[( qBm M6cP#edϴTޖs"&`zf$۔7rKa"VD `ߞ pu.uU<]M~ \YT|x))٫p|zVm`?~p4jRg93i _3ZF"H#] Cڇ!u,| QGj#%Ipp TW_'Q Z?!Fm}V ZpRmlO\~c4vk1{K;%߇TpS4Ϻs˳0>{w'}w>`Nϛ$%2Y|~y8zn 0547*Ђ|fW2 De_?SCQ՗m=ѧ wӉt(21='r }O6kw޴^୳bXP:U#*~2b*"ɧ^SwU0ZX3?[B1oS%qgL bV?ֳ'NP! S1$#L~XzziU!W'Nj ,0\hSFnVziķ`6?G9n.S}Bg_4R^oeh!17sЛ!7 Hכx7џoѼ;)Q~}W$gw}b0/xUmރ)]a/%k>ޭm+$\$EH28E cAE%5eEJ*払{bʋ{:čH`3sĉ -X633 v%}"DxӁ+RfCYP2L.RE6gs*{8YD2\:F%{ͯ^(^׮t#JcH4 /lPqx΁!c;Gs'.88 Hh;!JV\bZZ-'Eis ܽ=Og3N|@Y5аʏ򾸥Ւ#:L}Wn:ozEܓ7Fء=# \ -[a39 _ `ha)1H+h;pԁ#oFAѠ'Ǩ) Ba43[#RTވKp3`FL.DpsDB,,xGI4 ;0t6#a ?F1%}Va&E,n9xcc!`mSU+P$4ƑHDJN̑9j,uBfa=yh8l8c8K(vʹv͞NīQ'i1OjhIR/>:r }Bb~+M*Rҏb4~؄ bM!-15Lӧ=lt!@9L/Uҳ,RKәzS3GjEcO*\H} gm4]6ևpE-ˋ5ܛ&dC#S}Wws>`w{u,;|gr5}W:jAͬPczt5M5 ǽ'!\;J$7A:|$æTV֔L!WHB1V`jhw._i Vҕ`=A#*0Ŗz(`:덦1-Qx\kY7HbIB?"4nA]Y$؆DyW1G G3 Ⱥo"ޓE,C$;\N'Uz:de0ܣh-TA{.˯^^> r"̈́S-!D Ҥp.4p xD`B<M [u2LwZ D佖豉 i /MgXZ6?`9ha]^oΜZAQ9}k-'Kpkt{u9 UōA]U 7jvC׍޾VuI{[Orʑ;U@qiZu\/ańڽuOa9Ngm6Yu- hY4Ժ}Fƛ;o÷yzzFk]8mh~4I7un踩1rҕ^9#On rZ<}g>ay#huls9]4]h~z}I\(nq]sq10 .f`/":F 3GմVzӲ>Sf`)dp;oB!gh՞<#,RFXp`vXIAMv!2iC,ۢ'z{zZ0fy`\xw"#F/`L < bd]w(sx2wFI{qb0:=M⭱5\WlvbR g<9$bהAW>iSiSBxd<)VP]jʈhA #(H8H5AWI) v}K]on=ܼYoQ:lx֎//+q-1fǘc˺:3cuLLZW-/+V{P;InԝTjnܷ'8~mxit ѸvRx'bw uo_D+Xrsj ,-L Tx)4c{_ra;~)\]g6 w<&Pa]߲@[3l3 #&&$ǁ2dbŝi'/TZPi9I! x-e͒g7Q`Rŵlqϊkn9*=$RFt(W,sjt/8/:3X(q_" Ł{HD#`Y6tTO%Zzк `y: ë^Q, oN}W*ri_&.Gb+R[0EEڰ0}V? i'#s=0Mr)H =D y1c0\BET즲֞Qv Z˛A]Ra)s}%G~5$ F)/IsPԡ.&j~@=0 *?#9\ 9[xKݱ5DY.euoY''uҖ R Bd*`܊ GGDgDXuCR"jxT8!<1I(ۓaaR"2>df j1ƫE &.P#~6tyz͝7FwXGJK.-sZF$*~:u,E%AؤP:;Bv ibkS&96Qؽ7/ o٥}8 f\1뫅6 jO~߻R͝E9n~yoo(SQVd]zJk8yAjwM1 ^u5Z9椡A5'6bҞ%Tg=Y<>?=7m~ b0}Yda-wh_n~7/wg+K_9R՘e=yW>K.gvwٸk9L~s^ϦdcbwK:ꮌL,)cnI֊Ř8r^!iD#^\ES KӖjM+hl"vh-$%'.ƶӓ1^JLZ})uK@r CwZ KRO#LP96{c:ٛ Ƨ-ϽZT4J;s$#,B5WLst,wӬsz !p:Hl-X!WQ۫:lp9l%!Sv'SɓUDmX@L&gƓ63w%=e$E\LėߊP)zed"6 sֲquxVMtQzJ]] 5v jˋNFkgO\S[2UMvܡBRT5RKD $&B!v([wwTsUpÁ _j8!7Wu4_c cLPe@:u@eݜ oVzlo|7??x_ۻ,㋿<@pޞ7߱}e^,vD^l|" _٪ ~zY9Z$j3VgQ72tO?^\7u|uu޻9ex yOV?jbˏ6ZwWub?sŚ|~a'Zt[FWx~Ѹj| b>S8`x/"*҃'{h{lidAE }^FX= ~le{ _ # 7su@fjzt{DW= ] Z]2]B'38re.\2{CW-Ѯ@|WHWbGtb\7cW.^ϧ2]B b):sfZ(@WTm3otBz-5]?f-- ?2~O-?jq~zhEksn~..=N9zg^n 9G軃Vߦ*muFwy_R '[QCUjgn5wUfɰ' 9[52ؼŢ m*ޛTƱcI9WWwV-u15?B8ؖɏO +}(n*ŒtZ"!wR7JLtԡoӊ#Kg0P(PyU9єx4ytp k(y ,q%Yge9aAQ7fa=gn#  "YjGU((J*'ԓ[h~X^FlG;5r\ X%Ci!CMhPI6!duNsr/X#d*zߴB Av)0@ȶj3|Wl%W&q2M!#( vE΂G¬ fB @TLFR':cT$D3 2y6@@0J ޛj,h@Qā.Ʀ~_L0 !fmY2VD),d~GjA!N)ÐrW!8I]==() E'8gT0-f5[5DR[[&%"#p`l,@.(b C$F] ʰSj|(p&ؚ2&M uK\R t׭#f}ܽ=4p UH2Z!p"8<`bqONgc=&kd}|/+&-M B&Ôm bPT8xi+T YǨd^) $]I<*2:ĀbPd,  :#.B ؃VsHV dxQkQt, h^= %&0A$h$kuhxQ 4X:UBN5~T}gy|d9 ld-Z!ATD4XdΜV~3X/xC*#E|K4ǎp=a+}6U*11ĵU 9< V>G<_oN\/ۯ{iڋsp&=z0: Cl 6T6\` ~&dJfY:57ZSTzI0ɢd$hh ,$/{mg61x7$% <%*`!9<פVCq0gMJT; *T,0$W4J3HρOlty,[ơ0Xa\?PDlR|rsp-L9B~GFɟu7JAWtpnآ)7baFP'\:'X 8_1ٕJG5- 酛6/lhZHǚAh %_Wg&rZZqFC!?}Nb[6x:?T쵻n 1ƕAU[Gs5i`  V .WX N+̀z21/dVčrv%#p=ZWXOQpcJ M\ѓ@b~i+#Yc6ՔWNҨޙq$  ތas"m04 SBR߷z$5H!g5}TWuS]D}`vD`MJC#C[mO Pq18- Ҡktw^;J=;ҠUF7 00azU"Lm%@,(hSi#LSނ9٫WKLɩT߃SuBVJx d!h9(t_Ҹ.sbF* rG5iW$R c{{CY4g,ce_f,SPjj9" fx"I"\)f*dbB +B +B +B +B +B +B +B +B +B +B +B + \0t̉> ĥh+ك@J<B#pW\!pW\!pW\!pW\!pW\!pW\!pW\!pW\!pW\!pW\!pW\!pW\@`͎q< Zr'\O*+@ +B +B +B +B +B +B +B +B +B +B +B ++pU}1W vp+W@Zc)AkL )@ +B +B +B +B +B +B +B +B +B +B +B +jN0P+ɫA-_]S iar ow:t;6%2@\Fi9;x> `GyD|Dq8uҪ??HI5P]YjZoM/ B~ow{޸I|~$FpnY Nx% E89YNaa6F l)<McN3m"%^8؛<+ܥ Tt|^ ,>'' O_b30\F1Ƿa1oVߝl׳YE>A#|wM\sޡ/1ȝ+r~P- g]}6gW%^he%U b)D\+E3U-+DD&^xf+^EuQ%F /+OX"sػUTreJ.U!h=RN^ؓ*twtE4"a{Sq13W?80>] UxoӤ- 7"Anqы +m&Ϊʺ†X9jmY^9^G<~s7%l| z5/r^oor]Z wЛt;z~vǝ?Wv^C___7®tuZջ=goePߚb b%R.ŵIO\׿YKO=ǃ~e%+ /6M} 4@}Agρ|09"G_C%Z~`֦o`ae\+z7w墛~ѬOa Fy}/{&zVW31{r*a,-p7\M󗜾wSpN)]?^KO-񧽐\/^=X FZ)ٯ-/(+S<br/m__'Z5ZO-C{]ҊiyK&ԓG;Q [osF6 \G* _k>ؖ|vV7j֦d=7l诽Nn#ɭL16,MҞ jf>Tw}vfLY'ϳUe]4O]~3K5MmKKA-j3TR$_ cLg/`O64Ti%^wG[@%WgMo._5<*8V̥.\K?U qv^{kMﻵt|[JR|/+޻_}ޖ7 xֵWl;T+Rb}eJK6L,ȁK8 ܙ YAR\0c\Ъfl#E5qkVʃzƮP %½¹D&eYtHͲm~8LfY2s-ٮk~>Fb<ZU#5FqSњa S8rub,h1 |`k-\%Qb 8[Ԉ]#~&hjzq\M!uKvՋehP/^zqzPakcW}h[ևv7}6VPϑ[Wb|na|)u(em@g"Qp *9(SpH6~n#.R8;-*#K=2"ҸH~q7o,nqB2%iҒ{gդ&" RNєrV-6|h9& CGޅ]W^1G $h!ܧ?j4jBŤ<3<>ߟD+K*]AYzʅ&*t\Ж$:3h!pYԭ6jq&h851{G;|ޝ.5*>Ea+г| HEaR'e+ׇzIJۆ7 VU&MzV `Wى]21,DRН<;Y.q)JL9PyVL偔M$bt"2v*:Yl7,C kKU-t@q~'mj =|}~ʝj =]Q}kD Փ >dX%?E`ˏ&'+:E}'d{ "R^cLy)8%YK̄NzmV[ $bt 9(!cwȨ9 }l'Y;fc3q9W:,~9kR46TϤ!vSKm~j6yMi4*P9hTj|?Nb0DO]*S+FC扣149IeF(-ae u \Q{{Dsm[C1^?|{Y.]spJp6z絧4;sIEbFd^ J1Y ")*.- Z6L6T8:BR\b-,߃p͖(EqtzޤrsLZR$`W!|q,e"Q0t'`!?HCQ6-X()kCL+%0V9*-$>'Ma&,!@q ;vIHh-cL㛁*Kw.=A=I .Z_IymFO/&0-O| z9-ut737n}d'uϲ ^Lug]7=AȚXe,I%+/ɵP(Etb ꅔ%'K]{Ed5|R\#ו[dd^,37*m)XםP^ >fg;e>ak/Umx>yYoy7:xˋ7?{/?y77ٛ?=I ey{`:Y$$wZ?>EfEs#nPm~oP&m~r BB/̧zqǧ7nBhfy0岐=W&4[n;qYCƝ0}JUϥjn,ZhST>8T]ҿimJf{Vnbb֗v/yٓaӢ W6S ά3\C ̲'+ӧj**TVؒQSҧK%>O`!euIO9K7:NKyl;hJɟ>2H'W}e3B'b"׊fa>tfVxza7'1m;I93v:?llp.l =}ǔ {߿nsu~/Ưrߊ VɉƜmRp-eLE%tyU+}^MU=z>BŁ(pQJ"=xhٍ4䙖`q/q\́mӖﺟu/#zPfuGֻ]! ~s Oʉ{L Ɣ{T2!hoԥM3U3a:5kIEjkSjȀ?.&NKh]j]֭w9ltfC]?UвeZvyxsMy-z^h~=wׇy5?5G7t_9T؏[-ۦF\u4i6O*}t~H/jnY7?nN>xׇ*e_H-)@*<[⯻|zbo޸ehp+ORYRe,:uy/YGS2`K.-2lq|:֗[WJ@ XJ7I!gh՞<#,RFXp4\% :W3(ս'-=?\~`A~tXP4Ӷ艸5h0TPoq`gVH 4R-xɘ%^F`Zi`#eFt ΐ;͓]$+[J?Vlvbz 3e ŕOp8'p>M}J@G"`KM-a$EV 10 feW.ˁ]2qmn^_ R˓6!@72NKڿm4 /tF=WYeo2%-! nAV9AH_p;߮ta I-6;uS,&5Se3gÙ13c1W4̡|ur`LLZWZ^ޘ:WNJ7-o< l'ݶxit D#d)wLRn%n-P3u1O왦RL`fX<Ʊܺ稥=iǃ ݿg6 LRWuLVW[D:6~X+eR/&f-mv~fd+ŻW;xamـlP$8y{2` +a$sc!ŘX3$,p}ZT<%nZ.6Lz[;V\[tuc^Vu\^gWSU5N*q^5Ƿu:`gJ/{`%"D^ZK|I` Ge/=yً#Ge/(S8Lm8x&D.b(98 F[9VE۴hX)Ql=N;[B2<3{ hJf/hF6H\>,m|77nF^^k鈮Ȝ>5tTι|=h]0-x@x#"r͊ Us͊T8׬5+r͊\"׬?)c\"˜o$\"׬5+r͊\"׬5+r͊\"׬5+k$Q?-%^-xe]ౚz$V+q 85+YZm0`Wvkc>n=v'Ǫ#cUOzlL:l8;6 EK#;\#BIĎ -(QW (WKAń OjJ;f (Rk ۙR5gպ7rTgFtld(ʀ3v]5P7uYz47̺l#F ȌXi1cLH b؋j( x+"êqV /uP˛ @]hʹ* &efuDGtCx0ẊyQGʐhxy fo2sp5{罣~%F|W :IpZ^gi`.?]tbde;/q1өdM/uga}p0.ޟ~۷ߥ9ߞ~SL~s7jou$(8zw7?<`h0^;4Ul b\|qﰏ$z.*eafP@~(~; ,ɐ:uhEH&f3oͯꢣI.aA錪F"WbtTrEEeL n0Zsԟ֣P skVθ0I1Kl>gKTAhD%d;e+M z5j.X`ιL/V.҈@1_yJ^6TK7ߚQ>~F&7aziwwQ[9/,FH# 9|(Z^◟ߤoX7 %\o>G8Do1ڏ|,aٓٷ{N1{t` ߥ$dXKU U+׭p#IYY+Q;F7`JƂJmJu6R[yYqϟh8I (>09GBقV u$p`+!—\id$79YP/gAG-5cs>%zwi;|V^:9ρ}`3wƩ4MiU$CFc@m}GO~~Q{&_+oMh|(Ym+Fb_rq>Vڶ飨hN%xe"( jg&DU1/^վC/޸*\H[RSIBw6Mwg=ɳi{kG{XɆFUT=ںW͝juivxfz3Nj (޼m 7u"e hF_[f^GA.fę½{'{H^(]4HၱXjT ʔ$Se?]S:u;ˮʤisa9B?7tQNZ~Z*$CKK)FD¿>  !F7DҞJ*h4 o5 0KSRyo檯R"vE=hsτJW捯Lş?k56%XR*%ABcTJ(/#GF8jCN}gyLm*8bU ]rY9~02MחF^*nq] qփc`\bҥVpb#V(:#ݵxؘfXerxUֹ>t{)]7{Зքp<+myvSj& -.PHx3jg0Q^1Tk%i.w}N1Pa[H0P< )K['|e^0ɕgWR>}m~^ G8w[﮸ƤƜ1ݵ8fPݦ=iX/t 3/\~mܙꥲ5Ӷ3;Z ;[~J^dLҒ /K#0-I402#0?YoMx~d+2&bքanIC$U2*£H Aw U6Nz5P~O;T*xd!RpԔ1тFQ4`pH$9[Ʀqxڞ)g2>ybtCWMƧjo>$JP< ˤ2HJG)8r@ %^$BZ2΂! 8.X#)1c^aVX"0(FZEZ!uOO{9=MaH3'78<ηuX3wS7<́+y(5)Ҏ 40ȱe69Z5qHDa2hx%O?g4>:EOxb.FS \H2g#ff$( K )% d9 9h KJ۪jB}VRROSҴkb&LBA.m(ܺ皠{~v\V4 LRS͕1A[6ݢV3hcަ8IS6KseԶ~ID2_.tՑ\'7yW6ljV% jB0[ Z#J!`Ha<|S;ɸW x;y3.gjzH_|L#"3@z/[hYRrIjE5H|ɨW/2#+JZg/"b͆Df0!#] ͠7tP"ƞ"Λ#Do ?~L뉘GDft=`1ph-]lF aE90|L\7IY 9[b|!?\J#v'1@x3v:dcy3T+%K3nS_cRkP׼$>,)5S f⮴$E ަtޘKKL>2~{yB`-zc$ClKRsUB6X &3 sF^F*U\DE)w KP9*)Z__-W}\uˏҝ7l{wˋmPyYczYKsc򓖝OC{KǏEyn(*ޠaLT9H6 fh5-{tZhI ᜏ"ce0S2%S詹7S8"Pk `k)HFaFƻYʓ`ᰑ/`, }w}iK{rݧ?vqמз_Lrvv7|kV*6&#.*+8*ZްF rJZ,P?<^IqƧCJĦ47\C˵i0b g7bQ\ jvaF5co?>׼As>zv^}OOYJ:1PJ،EiCPcZ*:kJ59(% 4,&kQ]`<n<\vv`<D6>E8㌈3"iVP 1$[LXd 6r 3LPC F*嬕&h!wћ3Fs/7UouR\\|9ad_\H3.θx4rK5n$F`W.ĹKhR"D { f\| \<<6:C{/>[ -;gbV-˦{tOӽVǷCJ~+Wr}q׋Էӹs~ &rE>܃?+ih;(" mjjC&bVde6| C}x XSyҹ^[a2ɻ7k CIH@ZcNփD*!⣦6"=Q$:֤-DI8T60PeB-UʎZ_ʌa^4Z6WI;oYA_]\\-=;zR+ kU+',=Oxw @ /D9&LLcYz^K6U_p",Snq6|A?;2}-ҙR=Rҟ^^LKߌ(9LO &MILFM C^3|у&|fgԂXIbjoX V `kj(VoG/ŕq.*ٳeb-27AS\L2RZo}5{K:+gS6C83][ƙvϦn牨Opé_ny}ZI=x)2  013%уw9Ma[:U|@ ~i߳y/JWmoL>*yq :h.fEn-F1r,iI&c{XJ>@6kZȠz9!z8;zYrT88{ˋn;vo14쑜f=Ӣ7a5mr}xwSX:Pl頭v?^^zt]I&(>ת}"VE@JŤ"R5!e0F+ZL58CMl55C*m+laܳ k 2gd x)4+>Y`R2iにNDх( Kٕ)J67F0}m S i&Ym51K&i)ObijFI;谢;cZ|<ckB|s<`*k&IMrc`SBL(*LPE31&Paαʱe~y=2Kw7mc[}&ɠy:iX"<" 0]hL'zx4=n:ѣt9NQBjzzԃApգGIngWD |Dp;<^=にmW=J3\=Gbsf?'$lM:䧍wYP sZ|'5o'W׫?-z5گd=W^ Ǜ>h xD(txPGtr&1Il fyOr^Ň٬_|w^~ \q"M9"'7rqqV?ҠК@ˀ<.;1g.m{Ӏ|7VO~_ب/ۺSbi_ r3>M &&gC,;;47w[_4U mlxJxjpK҃ fˏ[>( YӼY4MEX*m^(iwz ۈ\iAؒ5t,pգ僟&(WR{JyqqyϟWUyޟ{FObDzo@1[@]TB#eF$Zw1 %X}zеDZokywmz6ŋ_|ue/~;6FlA\ r;{CGS}>ཿ %P [ͻWRny sN,8xJ6ފ[ь@a0;ɻG4sgcÇppeCg&JhtZ6Gr%dI-L)MnI؋r %4smwB>=wSc:=UK C2,9H5-+SHU -ahV0l h+63S&It<8Q;Q뫥ܣz !'~6MA6UILYC3 EL h٣ ޴IFZFKN2e*MTJwҼu =U0ZX MAj42 yø0,62B8cbg:3~y{HKݚ86k`gi&#.*+8*ZްF r͊T,Xzk/8Sru!fJZwQؔF怫-f`-[?*݈.Fy0=jta0jg{tSf}y!Bw$eA"PJ،Ek㡱DɉЂV -h}5\uGI_TQF[ g7.wuZjCAac_D1Έ8#6MdCZcuOh9Pjy#HIAb!` ED&kLqE[֗uI%ejP6R)g5!D o(ٍTIqq1:}q"͸8]#C/$] [B#0+%LEOt@qF) f\| \<<6:C{6<}s6(X fEpK檍bٲy&wz7#y+Qo/Χs>> LxOPLI"L%|s3mWDW, cŤu$`h$dKHZIZhځx"`ڼT4%Tı&XL%P,VC1 T[(;j}3:9xѤjٌ^C g}y|uqqݶ#;z֎K; kU+',hCb6` gـ}6` g[ـ}6`3e6`͡gـ}6` \ gc?ȱnx=-km#GEȗݎ~vs37 ̇hZ<[dvKݲb'j&XUJ%:{tX;½ZW7r^glx#[mJ Z #FV=zկ&1)'s))!VNޡ -d9dg"Ǡ1{rh61. t"XU1cx%wU$O{!RڽU3ЃB@|gQJ0I(LT,\" A@D̫,vb¥0\"IJ6 H#e`$POq2!15˼ٛX׬N]@:Ss~O/p-j3b9j%×;36n+oRۓ&d78䜇.Q+@^*E FW^%_5%M|#_0qhJmU!yNшQpQZ4XA(IL\hڴ=NZx6%n:wY$ g#S(HLˆˋ!jU,C+gW5GТ{;w4蟜:){7p4rZGL>uy/!-#1ZGۆa0: UWüi/pTѸ>HZr4|Oor5f?]wsQ76jJbI>-0rx}q]^>e;io'>FJ5*6^_¸_߽>|~yswǔݛWO` !"" $y7?ah8|hnC+]j-{chft_m,b{?^~z5'MÜ%>Qً06\'r؋ظW{ .nJPBF8SʼggCyQ8g[HWk)LqG JC瀏ٔ2-²Ofjh**TVL B,}ϳ9V.=}vx//Zd5;b"7|QxH6 XХ>Iʙj,??־p)lp #Ξ,M?f&!xumg]lW풵vưvۊkqۊ ɉƔ,(0\KSyG ]^sym؇󭼪WϗC<8rÑ+\E)d.Rh;I)p*HAK2y%! 4jNܓ^vL/_C4m;G_+C#pJQ4 p_psDsc#s2%)\Wjcs[ e0D:GTUQh@hd$S1KmԚ3LI9!J1p.t֙8B nxm> kذPWuRn)Qzcc ;" chmsf7$c@QĴ1*_Kx"A@RdWb6nM6x2x,gŮX9bɾ1IN|iV6^+k!Ae)^Q9+-J̃/ pE9E-FC!j1\Y! D5ʹZE޷KZ_I|{RejF^S%g^uP Լ > ĴJɴ/xUM_%8WII%PV&%V茒5M ,U =ꞷa.芼ꆎgu9vudB-M͸jqҮ<}nǧ_Lϻz|l|?9ȨwW ud4y`[|T5d^I5bZ'ӭy-=Wm I!*(QD5"h%TInhW~:㾂:X{wv8֧ [׫RkؖC{ cB7D>OW'S 5?\YZWR\b._-ч9"Rlw`2[%+y%TԕSW ")qQχ|wcGߛ/+6ݚ~bwAv *}P8I:h+M#h Tޚ`bu=gy3{ "g %S#ģ\0(9D1%b;w&Ζw-z{:$7}>}>I6oތ8gJ̣9s@şzS)&ϔcy Hp /tP)Ɣ-{ 1Id0JHc"+U4N4B˳i\CQ {P NtIThh*z<''YъB!@$7ϻC*so p]9/>+l9]S3qҗ)a<1hXB'IB\s9,N($GS4O7g4Ƃ>ZUO<_ " KL0p1k"M(\)Ӎx݅A'ׂ# 89u.A`y@"J;ѩHN$Bh;[Y6`}XWE˚t2$ԝ@Qז]op.x^F7 䨯\'%Z'P Ϭ`3TUR9a K$8G-j+5~IzPWV)r[.bI Lcu9_HֽLvmft"6嘢 ys;N'ŸanG;i:Ͼly@[n> ggqf̒3[λ .,De&thsG\Doh1$XE¤ZX r3LN)u';6fϯ^o@vN(q9e4b@ vՅ ӊ0+!gbL 4H+Y];R%.K3:fP\0՞J) Jt $gcp Y)ĵ11Hg<#jD=2Q8!GpJF *:M`ɴÃ9T[Cm} adcK?hA*WWe*Z RJ;O \\M>p]Ԓ)QϰE|Zލfa.-L[2ݓe;Wb-ܠFl:v1&cY#TsYY+"V<2N F)U$9ʽde(u"uAR+NHÅU"NEO@:U4P21CL&^Pcp[*HV'].yg+دҙ8[DUrM{njx>|1~PaJ %`wmm$IWy얝ݳ>{ӆWKkTTEԅ.%@7$UqDfȃ | *VP {]4[%8NU3SU$р"e-J(c;jglw7cy\OW&g_%߯qr~xgxLNup<\֣JmKm.d1d_P:EG>LbBv)']D *DDR"I >  NQ[Su5Cu[[ГcM0dKAorR+/%/ R]#cglGJgXȸ/RXH=>iyekg_ˀ뛝|dß]їOzjfAKlvĨSQ1*srEucO T¦&2U؀K0@9,.1sLjݙ9;i%jtuǨ{ojBsTuDF9P> =`]6H J!$#S1:OGW>O>o]A9k8ǯYY/8VK#i0c^7z{:.Z55'yӷkY mQIdBIr1QE#39 Ђ <,tǘQ2F%H [*X1xٱe! 9pq$'R׉_s $T(DTWqaP 2lC9mB)m&c%ň$t“!J[|-쯵:-9 <':~YyojF3:{c ^RBJަD9ZH Mkx*JbFF,{#X){7^ݼm ArgC)Q}ݕڨ:6J}XjC Od/4jDLg'=dHT"56Z>eyRpsD:>)O ٚreY W@&XȚwxjylZۤ6<Q9 yN7`jЋt]OBV$r;9e)Uۋ\%!˔OC%faMj1AV/Nc>*vBL1`a\" 6@U0ʜ WQhV=wo&<2Ϧۥ4n&NggJ.=yώAGcE4Ӵe$e[B_+4UFKʎROzr[r'2E|tL MfdhIQ@I&&!AǮ:3g;9bL@LfQ5lc KpAŧOi&=G ,+6kkIYX8Q*CԚ#nq9{{&nڠaMQK>`T%{)ʹn+z^)e%:(֫ll)쒷J/}YyBp} gC"LXAa$)uY欩0˭i.PI,)R$} LM1̜[j6 Z=byʻW|Zomϧ-Mg7] #76Ϲh|%B~WlKxiꆯn^̮wowR㲍|GaU_\_/H@90O 5M%4V2-Ma7ۻ8Qk{C됽J%(!/SdSe$)kb"'@AqE+ȗ!)|]i4cѧk^st$X*EAR)W{"XؽY 0JrNLo*4!U" =`%z,uzWaӽ08SOygaEǧM7`m 6uW"^'uq40hkcFePW^Wr0:Z`CĿfg_Q??P?~%؏c3?iUo Ll?dp~]u-Voѵ.zoѯ]~w}mpP@J}twdVJR+u!{) Nh[~i'l6Lx2&n VjiZXxŴbà]>Nܝ]yG8w֋ ~Rh}<ʓ`\)fvnd2gB^?Fṅ`~heZvɪ7g$%i5*N}zߟ\{0vx?A<^+"F;!Zw  ݑT`/._W_vfMaɽPw֨".Wm][+9JX"`AT:[e"N>x)8 ڇ\U~>zET޴m* 4(YQI}">1( +U5)K L o;: Z=Ot6&.O[Nk_.tYz.nzY2}v@*tU 6S誚[${j% ,tEJ jÃ+6W :;\U+•VN(?\OA\w(pUpUTWY}[D@'Co'[ ~w۸7uih:4h?_l!FWʒ4})'wl0 8Ju(0]U{_Z=|0m_>\U>bsICjWʕ1ˁ+GB!laW\WZpUtճ+嫇Wq;l f1\ZG;bf W}_$#: bU5PZk+I1=\-9$vW\WZ+mhe ,> Xs&͇+i1H|#ivUwtOWWh`? T>sĭ҇n᣹ )01W݇]%nmPܳ^L!嵼t;H3osA#=\AA!+J ~)uÓ6Mzutvdy1*=G 9Pք\`bf tP['?n/C4V L( Y-@7GŬb p4Ga%wR];{c lN$g %YkHDq?9:EUHdJ`.¯ ɣe75X"aCl[KޥV"JkmQ$[b|6[aIǽ`5l۠y;^ߛLS6Fyd&1lV+۫mEa?/ ^o1he3[Y̙C:<Ncʠmjk▱[Nmش66m1|}"u֞^Qf&L|xopϲ]-eCWplOζIT2ϰ]xpՑKTQ,V8-\BCO*om7fvP ^)[bqw=ؕhV?H\WqaHs0քm&"cg-@rFϵ,wJÃN954B*] |01xԏ ͍Q=79kEdۯIQN)ghcN&zG85qkChkRo] Ǣ֍\Q˘`hٜHMV>~eF=  Q~w۽#4%< Ω Jwfcܲ)͜O.r>)Ʉw|u>OjnCm}A~hq;I2%İz8K]-p>%=5/ID 'T=a3NM xqSI" N(*=Ïw PjgC6C8sq? ~_$aaRdFl7o'I֡L]/,t'' ^rǃeH({Hf&NYeAq.WՎHfGr0E'Ab?dQ[%.~׈䔂? s+`bs▒C7WCKt~4Wb4,?,sXȌ'I,a0D~fzt^s3n]X.䠨c<9^F`i`#Y=7)N =W@[4_~zf_WB-I($j"H2gXDYK ;s` B*5־XsƓ:i8&4F_ 7lykSOץ J~у|L |L/7Ε&@X&j≯ZvfR$7w,4~vzrpమ1~AΕ-Jksv+=g ]t<)I/ _8."`L@mP&ʙARLuuɼ@$ =yffοU%T:4?eUM~Fz8u_+:e9Zv S9|ؕPc?a5SVl6w*N\pX(U$YFJDbSc9h~jiq<juBjC!fH ^ wE%Kv\3vj_4H{Vm](> /?hQ[:).p2аp74lw kqFlMuuzrVD Vrwq|ڟܮŻū#ƹzŖI-O3b7[W}IREX}Svr͘Ƣ4?oAp"gk([JL.0"~ cpt >op 1؈[mlHe%6ך)rI5F 0BbІٮ|] Fw&iWdV/眱UK_){0Q 7Xe%*5(9&r6ȹM:2,"#a5o)eƮ!mgl`m%;tn'{@q*]g@q=bN1HmNUW|: nKV&~-V.rqD]"b2ĤfQĮB̫><>+OKw^2 e0&ܣiU$A{cx^~xX4rO8 Fi*xN`k!|‘=1!*[X1c2b=6Bl4BZ":/;cg{xYD@mti4쭋ܠӑ2dE:ԫvV74<1{} r[pi')fr#k{|dR ;596ோ)[Ρu1:/[os0fͳYt-shWھMӻΫ=7Zn2k} 癕UrB懃]og WݼW0,f=ηD[?;os\/QG|>yeDo"#6߄as)cZ. k^`#*XsSCθ C#H.q,ևcwnq: :±ݮMm0.Z`U Y%J"vLhA28ht rE@-&y$pèUڥʯFYJy>%3kYк3vZB5= t]ٕ=/Y%Nz.Rllkq{5 nz^e58Ym^(25VZGX"`rDC<eIAtܢsxCSF/>JUTKFǤ"d03,6XFI$Nc!P E%FNRV>g]L>G9a%HApI̜i5 1&A8.Y?e;J}6DujPB(:8ªsAW 86&Ă1M "uHE . L-[>2Br{ωH7뙑T :p-F; &#xNgԩm :['bB^ ^ϮA7ߟBR?82{ス,S&`8xudM~)H?c7 #0~2'LI-a)4B DۅR) o5@uh`RqL7~96.*)Mq"~LY T'BOuA}VU}>>iKmRO\Nj9Nj9w1gq|B'ه2~҆T"x)~Fn"B]&;6s'N"(ۋx@ Żt!F,p\G"`"ГBhA #(l2&"q;-OS:Il֏~nN6yMvJFzi>t{_~r_?'d6BՁ2`2D>J`(!#GhP $Gp'!r%Gp4 0tsRXxk\(C"k4, ΐY^;:-j 9[s/- D -_]QXPjS)i)5cls*k␈RIÄ (O{u4<xz鏦g9Z/0ab9T/"(ʤvC!(^9}hy<)\7ٻ޶,W?M6㺜bgAcqШmDܒ=HɖSR嘊!YԹM˼?t1p0r֫MIP3 D=BL@eʖ!Hv2)m0J*}jѣġ]B.RT۲\W_J|^t`UfVa?>w]_3  S|y<]',O. pdY'N WMeo,RM)OSNK^˩ZG玿/ASzl鉮J̧fnM?GotٞTi${ N4N,;}Yۏs_ҮC >?iu::+-b~%\w+>wn o^".<t 5ԝjl@R  d ڼ(@%w f9[ oB !Sܦ=cK}Z7,7W[-pH{{շ|xw쪿mbcs=xbT_UZ߃Y9fV6fꎢϮ\t=.WqCtu3B}y@{ժݣ`tNOyW#]vdU؜Swg={ذ5LqK-f]Wnxz3У\(E3$DFy*yI:*;P)ѯTgkb9PT'Uv2.!H@9IYc8^xFO%6E4A Y3js,IO XGCR:P wkX-n5WcaRD7LӀr^F d0j~a=a5a=vzmHey`[=+sdYQI #J$,NDWX*[UmBRE#=q-iTeLqOzC2a|c <ݦ%gڶ>تiUI}s׃zMbFB|"/֬|֎gQs9>5VyڍY|*Y&q W,d{mdlݗ~ҳ>OwBXCIy@/{K]_tICZT!:eLG$=Ft:g hJ"ZCi/_GSJ.ޭ|&O~}tɧ4\yeuDkBLߛ!G}c~641,?VKwY6h̃* b_BQŠgtoGrw-]䰛*=dޱWCfϖB~)n(| 6޵L\];NߝtJ)NJLיE ay 9J"=M\#?>MZd+획fY :*a5qvakԯU 0͏CTFD3"∈[mʤ̒4IȲ%hE|{cE AI.CD6N $Ǭ рg|BII6I!3BǭQK݈<ѫu%!uV%⢭vMba<:u$.!;efG>F IqYqašaq(Vn~h=g庒ys7(G\> hc_&X d9`^pWϣKf&jA )6dpwfB~,/16`s//!iBdLr69Yg%^`b:*LRsд܍5L)\{(CPU=x4ڊE'3:K8-ORW\eoCsK$MO-6*VۇRM(x1_cG_.q0Ѳ|!RZm* w6G| igQ F fH)} Rot/R񾎡 q)礵>Zdd;rdzɀ߂G(#X8LLOԩ3/udu:] BYvdZ/Ĵ=MMՀ=7]U-4dѪAXeP2xnv (Z_6/A ^.`Ys\@CTiFyds#l`PzYݲc3[,y嘒L3 Z"h޲%{eIf^M8QۧHrN#zRߞpKՠ{bORyݫkFY2mtAr'PpQYl ' Zx iZ}͘A*+1ܥą`Q!I@z[ɤQH}4sDk~UgO՗dԔh (T" qe=KV d*U4&]|cBzx=cb:$攱>p]-yI_Kw*#SdVX(Rt$7Σک@Ò`j'FA`\R^ƨR=+2/HTRBrFaȠG-R(Ioe~߸j+}ln _h;0 8^W  4k,h@ؠQkS 85` l7,tuV:zYwzY̳ѭܪ=KH^RYL=M*m8~LȌ2A@ke;/]i;N-HB6rnrr'T|gKQdњ\:J5I"L%Y$xtE[I*VqA"ŲRd pY! (%{(j{! hJV齥7,if^^M4,y]=E-ji'"8R>(ut* u,ݼ`ͫHp y 4 l;*jy,pUbpUDQi R \q \i :\)•b~DpUî_u9x\iHJW4Ӗ#"9"c+rD0 5JsróM陝Zh ^,W0E},Wݩ^wxC2V`2Uo?ᾑc2GE\}4HkaHiGki˹l.ڣ"s;W$%q5rGe \q#Z57vܳ{p;kmx*gr2-`+I?B҇%,o_;8kwl1O?"u)8fd:i.L9sќwz0ϕ1v7 L_~˟+MK(_ZOuB B3wۦs@J5ӑ?+GG094*X%@;i'm.$%7)xUKČKișy|ᣏP/KN&wmT+ryJxwr=\p#Pyg3v;w;c睱yg3v;){[ȸc睱hyg3ք;w;#ڗiLv?UM0h:g޲yjllQdlۀATߕF($Y+ݺǡm+6duA鱎ce#*J@ jtL+Ed !鬅u7LJ.%T”.} {$jf1\f8{ ]y㯚8{no˹ק^z^<]<2~8PQ]i^iM6J0lŵ$EoPuM-BhgB%]FdJ3!h1d#GlBTzolO2~q,:{Ӓ]//~q+KAF(X/.EQ\,*ǀ~"kԂ!T~Paoڱ?t= 6= 0ٗ0tT{T R]ֲ8We5t_lL'}panZՠ >@# 4^.=jOw?9e)*ċ\z)E((KUxldA% RgQ{`GX.Q)ƹR,)2HgB0PO?}K+T7-CU:oi73̋4ܻW?wt`(7̴ ]#^{ϞF>^ѝ6zA*W+m0{R߲4ǀ \ <72 <%TSRU>h+u} Nd 56~tL Mfd% Ђ. M,sA s Gc 7q ӺwHgBgUK|Uꎳ?7?@dXV$Y[NZ]s(IDUDZq-f.Rk'}.ky0!161B㜢Mm.h|e `H|:JrȖ`t1pm7yJfp( B H#kj'9'aUn5JjUYUy5? %M`ɚ y| sO~),s=Kt / QMesll)쒷_TW//^f EP¡/AN2OV\I.AUjҁe,^|^& 얮Jȍ&Cn<5]^?7,b,npkfzs8dz-^O[6rE{n7=[=_5L=ܬdC]}t^uF-''㛛 OBu7Tb<6 d ~bs)4iؾ-(.)@90N 5M%4V2-Ma?Li8SUF{@qCZ( 2D LEٓEFN+͐TrKo:>=IXfr|ZgU~IA7R.LxY -RA H!J$kv&h;{մ}4` X0M1VR*\#VT-޾wz=dQ|y;0mQbsM) Z#Vk}Lt*UB5*<HzPvh\iF`1BňSAoDa[J T(glf@ _Cڥ %~[:D+wzVU Ia6A> w  wM0-#~>Of}u7F1hi6Od^rF.~փx2exsǫ&<  λa:jy>Jyy7dkc&eTWَ7r4Y:V`Cu_NNjˣxzq?x?~yآO`c%E n ?o~zкy=]/=5[pW N4RF?}qt \jWB%ioqWXl6E(.S*ZKuﯢ֯x7jW*/C'ή=ҿ_ȻEI~,Zo}<ɳ`\!fc;w9lgBrpiTC?'V-r_G5RK"MW˵ό.T%+õ /Gyvx;֑,0˩{_UԽz*Cd `vb;qe}~z۴9;'`$a[hzx۞`A8}@SA,ʜc1.%s"a8CƌٻXR:sB+\ C$):EPX)(l-IsDۛ8h~h@2!ͱvyvo]W5+3qĘ6@\0Wl`+3{ S6xΚMj]zZ|,P}c) AfddW/,4k !l4:*v*˭-j>krT':*)M" Jsph:Tk}dN{u[w:ky:n1o2Vzsԋ]> {BR2O5;rDY45F5iT Of@t>d¾g =gϕ5J Q'C-R,Pѕ%62z(Eѵ"*UѯA*6Mt4x&z^M{0/tH>NVZvŇzh Nw6})6 L^%!|& KJd EDE-zO./xp c؃cș$11YN!H3DnA8+d!zy(KyqHaw{Q7Ct}fH;vn* r)8bR^ ZѱMArh ɀHXQ,* BL4N3xyP{c_|&-3u޵q$20.#a97 `B?%(RKR~D5"i[W]U@?(^9y%NQ0eQߟ__|=c =w}fq4ƽI6\0xt]Jwz}y~ɡqO,YO.Oyj…?YtjgV|5duHިwɫiB5h`ڬ+vzҤ26&ӆx| NIwm&mh,p `|.naU*PTW몢tɭٯPCCUնU]$5uSԬ:f$RNe6vꭞ]/oTNiˡQֻ%wSLRߛfл }53tg*n=YKx=2vlUp$Uɒ!kQJ)gALxb1.٤JDL־myZŃy:ƎA#H5rӥ4O7/<\ݱf*DA[M&r(a Q8#cv6vG~}7qZ:jH(Î L9jJcRb͐ ™ GlHwtRfkE%tX92,- e{e% ~l4E=fr9&9V/\NfJ2%M>*5Ǔ..{<{v@-9=D%~jJp"z3 ifA36@?gAh3.SB]n"kЙw+:T6*)Ы(\ҍPl|+|ȪSnTbzn~zQš]fYa$YncifY H(򹖜N)bxЄ$+M~'yIfZ(<ε1OEM0&_qEsX,Don$*eO-M5Sᜬ-uGs !QСDzJPBQ4DXJvmFĖ E`uj@p|Zpp$jS|!|4EvV"4W7.9Sפ'ߤ/O*vzwճbO=?׳A(5. /`xlܾ{T3Ʒ=^5_<(u09e^(;30va|T<[o` 70RXN=W9CLDfdI71>3Y|MZm!>kb@}D^6r9ݫh$۳SLN]ә4, %5dy8+[m!h[s㩚[E+A"G,2; "J:ai$b@u#?P,9fy (W mB O$iaJ;f2Rʃp-1Kȳ#FNÌh3mz@W[a5L\K |yJSUmxVls(MV5էGW=5jeFsά&!0h_d8 `)A&#R~b*9X^/r׿O`lC GSב )dNcR@2J`hwZI  Ay?cPZf~ZX.5G9a&Hpсq NS;RNJ`A8.:O_폵45# 0(Qt>`+pY( qBm  b4.h;AjW>Rq-D$MHS'pKAO"D;w>E@qF7 rLl*N$Rlvή̗ {rI,l0rdp E~bմD Uk[b|}KWu͐fmy3$a0,8B?oC1zydS(mͭN;Yjm_9/cSpV !Q6f6Y tOp6,Z9}RɆJoeeA8s0g޼]w0Qgos BwRC@Iu'8ځxm5 M5hZ6{] xv%j5$z2T_nK5oAa)RYa` y/-hg~JV&;T1,J_J#\jlnzEQ[ChN- ϦY00^wŕlW۷](ڞ9wuCgKdURC?RWoRr{tru}U<%)AkT2.6BsEL jcW}(W^<2 Ww.S-΁gKH澙ÇlcighhZ/f0jj8D p qݕ/XX \S:n[NEeXK6U j8|.u$ "k1 sLjX0bX;7)Ky^H_QqQ=,P<9$cr ]If:ph΂Z9 :ӭĖѧϵۈ>y.'BϹ:|)Xs~xZ Z3z+[土Ob 4<0smC K紽a{x8&O.'!&[Ps|t &\'& SOLa'N0jZ.9]0^On?݂54XsrH0W1<d:kZôԀE c[Bl^`yn0" #5޷{b"C0!(WVa S띱Vc&ye4zl{hnDl FNC_R7 v`2-|jf,o7[Ʉ,x:k5'KT8[7OUש32{O*ܼFvHgHx.C_dm:z'@s1tyYzs0=g7fQ!̡d^qkj7 JW^+ j^(FO7{3܄m7s#{*3+ 3֜vZhiquv^1/m'H7o'V!(.ZnVm%rmmҿl7י\^~7o QZX:r۽"x=nOvxF pQR1qV{tHbaJZk$?4,ܒG4rOlk~ 2{}mRh(|ʥE3 >'Y6 rݻ{ԗsr0dAr E NZ="`RbUBXY i)#"b1h#2&"!Tk4<Ym(i4j*3~LmFחt/8X}oK!9H%VnGX@e҉}L%#`9BRL I4^4B劍;)'P !p"]$ y=Za)4PMo=ѿſ|vnƅH~A1>QX} bQmLkW M̼RA-M&ٶIki`Oےmv5f"KV+󇫩UlVBZy˗Xj+ )!Ø >dQ]'nWG,^iQn0n~? iW1jȍfO4k^n+mn+D`woy֭ԑݤRpmp{E쓽sye:]Cp>6d:LT;L"-ElN+5˴`d=+oSwտ|w<8?H.Ս,*w:C([{fʻj)no}0Ǻ{ϝ "D gDCp>f,Qs8wakԯ_`D6?ED]"޺-Iಗ: YZtg @ \T{Ke% S6N۬4Qg|@RII&I!B^;|8w#ǣD..֙%Yml2.\sFσ6ǣXąlNx摨DR_F.= /xXmvl2 tI] v$q. z n~|Gm39vq(l~plLG0H`!E 粁(\㝆`TAǥkܻrK˽!2&it42rUF!&VE jΝKn(EitJ [vW;ķ{B;qˣUM!''?`5kh,Z%8m^TYt2[g(ҎCSaIjp7Y,Z]"qLq6Db0ltj!0(Hr`Psȳ=fq~NUpAyMYv`Y>eig*c0wZ0U S4(_x|ELIPny -ޞfz׏w"{ps_?U >ߟv7iφ1ݤS`dI-{Sճv!$Ι[c7/+>he>.Zlk7i˶\˗{g/tT;--6|\ߌVoj㣮oօZ޺Z_u>t>]t u0zO iDL7{ft~&*ъqNjգ`,ʙl|,+q4o`2ţ/2!Xcd;/]ɔYɃkL+UbmNGV\Xs9"X*a&E >0LJ6щa,>(b;j -2Y[KʐcB޷uɵL9s}x4[6ցR}͟<}5D(̵R{$KMK:RY5xy]ie`!J/J"{K*!lwȸٷ2G e V.&"XVl\@蠳S:Zˡvg$p=E-S91%c@3 E4Ekk,VY~,nwEqFz <=h#ĮWMoT޳dNX=Dsg2h"YF;G0B{n/|hj]J\%hQZ{cxg)ĤbhIÕ-Gu 2hh HT"`Im G6,!6lb%#gQ( d.M!I_A>a3 S!1Mv'}mgy20:;Hё|\;oN/)֫vb4ӰW:&3ױљ1c'| 91CSJ{Z5iV22AH)C{r]{ցǶ'g|$weJG(=RXD 4$p FMMI4 YlFh1a{_Gn.ilO.nى L u&3qe %l.D8yb%zҁR=(WO(l쓉%q5O%VjcaIJ! ca% pU^ BFilv2z/Fbշx߽e%W6s2qh MR:._AxT  qZV3X[vgJWO{}F?tf Ɨ)y:%V#?ft0&/<!stm \3@i•ɹ);R(AI^&G@o 'DqBɸP2wմU2C('T% 3#C"Z&]~?%squz K- Oi\w_t6tnA9r-IzzLxsO}݈nvy7ort־ټuM'- N^ z+`g.0o1w4M6hGv_Ѭs':׵F[U4'kl<.(G?޾ͷW~}_?{ _߼k?XPv{D]⌝De' ͏Z[w- lеš^= լ{>Zskq8[ZO~3;>)Q)G8BH}eA#r8믐ؤ݁bVM(gZ"̧n1n/b fni+%E9vK-zx̌dbUQPץZuiŔzE_z2-Z@wje pko 0WEU)k f}k{6V:)l\ ^-[!/?,0nU"/gՏ ,0\hӅU`?KMjAu/T?Uŗ~ޅɇ~TLW i$ГZ‡&aՀy4j8F=wSzu88֗"@Sś\?=XL:)6"K'W ^-쒍v8f Vq q:NBuAQɹcDxc F`tYa,TV1Omul'|+/GHZo͞gA (>09GJقU uL+!L42YPiݍ_(!'ĚNk?^Pes<%'B_F:Yd밪 9Mт!W׵+8X,Q` e +gI1邞_,,,L\pXq@ gJD%+X.1 Q--Fis eIK:1;qғP3 zb>oQ|0U#&{hOusщphmR.8B䌂~-#"᧠QN !F7DҞJ*h4 o5 0KSRy##,Yyd'("k/%%\2NsDB,,xGI4 ;#,;BEÏxPwm6慶ʹ:!|Ʒ4Sr4O=kEB^) 1D*%RAYqjC.(G>śe 1h)~ɖӿNYCkV3oNdp)yEÚWs)\rrxrHaDt JYTV֔L!WHB1V`09w=#gsO(fʹ])Q)4ըD!;!Xhز8(G,q* 8{v+.4eܦӸ u"m6gnO|kh+XWSY'""zwx|YWLMgL {T0P!ho 7 /i/^j 9BfX)giRs h Gj{b"C0!":o;cƌL"^ˈiD hnDdi R/Xg2'!i/lhʫg۫]Pu4Z ^Mp[$f&u4-]=xV҄QkYk&76MwI$ tzu1a?h]@Sj< p͖X}y4KвE-n'-(,hٌwn#al$tV_B؛p[Z==L\飷ސϛMm8-gmig9s>C2W-6?m4Bra ^.USVjH{U`| l4[ RVJ)V ژW+3Xǧ&z_<~Rj./ 5* `ʈj H#.\40i/S+oKmzO8,G+G|:~R7>[fn͸d>Iq{_Ͽmd[Y|6+uT;.ĿӜ!y[ܽj;AtA<{/,WWN顷XduzR9_Mˆ>հVBXYL^jʈhA #(H8Џ&"mMv$: 'g|O.׬Mk±Y3:Yg)oP,\W3U*|9Ce=N͙|[rsжVS=oc;Xh5 *GE@P(nq]\q10 .f`‚AXD‰4dePPF0Q*X.Y_5*& r }A!gh՞k:Fo`(j$~gTrZ$z K:W?6RM¼ ڥmRaK}|4< ) kZ=@,=7bּ'DKT{sy< F]ȅHJTjöbމ%Jcy9qp}ãhZ1I &, ,Fu3{d.^UYR:ቓHdTG.h+$ve(:.VN׳r5@`g'fgrS:H}_|} vSv 8mFm>7B9JP<@I'1d Sq J1$Ik1CNBDH@9FRbƼ o E`(Pd&l7B88 ~zi5|9%M) yw"!jt5ʚ9JY;u ȊR#H)NÚ"^39Fp>~QM|4V$¸/Mr@jqkB)ȭ|u R^(4 DI%)_Jr l]'Ji5ŤMF)MlLfcCx1n^^ W BY;I}]UQoG}>5oEՐ8(6 @W 'עׂV\?3& %i7ʻÈ<ݗaHQ˰lðl+aSSzGMG V{kIRaBl⽑}NE-%%ɶgrzNVK8\LUo닻a6=0O7 ,P{W.1}jt.Ic w R]Wx^ aZUnQzA vi%&u-$vmb"sCEATV% jB*-pEF,R&<6qF>tyFg* Lހ9|5?f$9l]p^`dQcP28k%V4^8bCJ amG% t񪘆/G6lydQkjꓔqYc' n{̈ ^W[6>[C_L/Je ?M) u/`y@Ȃq k/ /^8gWz;T q5’IV{"F#ɹ `ej;#:mB\ +7‭iU"bK(gqok ÌМ!n6vXzL{ݎPm}֝7>V5Ҧ\*uKuU|b:z"ʵ`uy}\ ann|n 0+>yM\Km:R"=*H sC_T-uAZ|^*/1Ua^iGjd~rVk/;"\@Uʛ*o}ybF`09+jg6ii 3XGJO'% F2ڲ OM) ?Sj] ta(Re +}p1eM‹+ ~|ۗqKaÁqI\JuQr&\r{; K}'A KO9aA'Ϟ[uuLܶWW?wqx}a(uCWPnlyNY||O{Urn2V=t{U ]uii/AG\{i=& VǓG˓]OuzWVðJ8̠gZO9.h>*M. b`Bk(WX EQFy볻ϱ^5&`XxlT4P5ceZ0-YQIkLGJECaj3jZFNLMFSƪiYv>ZtRmҖ 9[*LxQT8ڐ~JHQbV IM XQ9N?`.]$S{+%R.BܶbVt3y(hp.v䵇 gߤJ꼞ۜ*29+6ܚ)TR*Csn~Ϊg\ ؠ[EDbQNEQ}8qqʁ JjFdZJH1\>jK ^Ȍ ^`A ƜJ*fv͘dӅ$c_]3Bƒ/9gw 7kW˴yi3f0Ǐ9MØ K ^yƒ"AhQE2K89B#s!sUcS= JڤB:J`Kґ0,.Dsfێ~<K:"&)Q(k$5/#Y_H}CV,`am7D*$bU9(kđ8ꯪ4c9Ⱦ(jss*P/$ IZȈ @+QׄHb8"Md ZևYVFhV[]5nY#N#vqD&&dbЂKxvh1oG. T(Z N^UwFY1"J ڤ4A qy# H4ޣ%ASmY#~:h|qɮzѴM;ގ zl2@JF Y9fQ£IRDC/>l;vՇe}hwӇ6dw3U{Fo80*F{ .X-b9{./*,5uù4DUi f (ne{Q|:l'E-@sߺcƔt֒BT0Jg9cKĀ7aʁYё͠K-fWJ_Jø"?g*-7+F:g(K%7,]C^==uq9ZwPB޻lZ豭pVV}_Fk\:zou> fvKĸ -/Q\ k1l0zY`x|lg6c R-J#" |K\K(?1G2ҒuJqPHUA:ARS NX/ЪÕ r ~285.DHp=>*ʫ?ׇ;2Kce" RQ.@SFі$ODCRYV%L8* 1Jlrṳ^q@J\9%x%x8SChٓroIj>zh+#:rЦ.خw_?g@!Yz JȔɽA-&drT2ej:LJeL0!R9>.sR-{1~p3$YpjTQルo Z#fcZpgs]1wLiRc{L|Bj+4:]a2thO?;Y޹uwŝkR[Io [a P\%u*J ؆:hy4c<'EUY<};:)jb!p;Nk QW}VIVIj{awxMZ@` 5Dy}R.Xe|bO/O_`\Pߡ%=BPuąfr} '>!`M 0Ar킥uDЁ RvOI]eӱ\SQWZ]]e*U9+٢1x_,VQ;[$ G-aE\s$p̦X ѡ,м*%oin]1=^1XDO)x&'3V>݌Zf]7g*t,0ȣAb8zQ,VFRQ& htQ7Ƀqw2Y@$K}}yjCO{Y RN(_vF_>bwu݊|b9or{,zTiN0:u0+H)8TP2*& !o7[WlcM[o?}qe7ybc>+h dV;p߯/D/6EIh@5I*׵[ݛ c'V A.XgLρ0AQoKKb$޹ȁHm}$UosZ5S"Wl?^>X)PB/" k"+'Z@F. g ޱyw`wxg/GjsIb?TɗGN"oOeZc /nY:Z[IGz"\0IG& ڠ ,[KY~-EN'mmT%P/KψZcO9r|Kr럮eJ3(q|LoЁ}@~(AX pCLr5W<8e6VgIL8.&jQ-W>#RD#8Yj<JΔDqT;*E oFf:q \]9 Vcc=S|+jz̈}*.1*cg9Cv@41m1x9NB :Gx'[ݱ?:>^l8-CY5K(vɻv#7J2~iW X.pʏ`h4RKX,-%)*rΊy/XW_DKjE nSDpHqpGdNi- Bu^Vܨ% ^QCzT| J?tȟk"[P6 #ALm~w"X3F.nGJgt7*9tFThG\H`ZȪn|(?l ְh GR@JH4)2F(Y]1>Dk8r@3NPd,8<$@R*$!`$ ABk&!{bcLh2cpr<8gDB:Q^+ҎVb;?~n{myesWFUk8XzE-}u{/q,3/AVmf}nhz oj\8g[ ݶ3:B2?Iov5vYyhCw;W.znlV %YNufMﭖ/on|bl›!slnotyD]*##qz?7F5ß/yHw5;!qڡ`uq%R"j?wDZ[]{ޥ+O#:bL&j&B|ɗ'3WyNL"&N rT HM ʣ~D7Y%rrhW3~;ƽxsS\OV]Հ烎E7('틁y>wn>L}iMFnW޶5;QL?F7e,4ϖr'Lt)X itTr7$ qrS{9a:4]Nէ."\13GJAZ`Q/U\Ф.Oy{ygڴKO0\c 7,kt$G(0"x1D-PcZO;AjWͮ[CFjd*onU:R GBE=I Hfg,%eRJj{U}ß^.n|o4GR`Z\^?]ޟZ]Y4>rmCJq? BzbD5$.a0 Un^qbP`X9R//}.{u㬱WF:dScY g_U]aUmbe;GԷo5$[ӨBxS7׏ ߻:׿:{y//~:93_O^Jw8s:M?"@=NCתi܈-VQO&|~5is \UB϶J8Wv~7˸.rzʣٳ({yCCPWlPoTI+T՗t*&Rgs+++^MhO]7,L9 g5u/?{WFO;w _o;-v{s]܇,EHv2+vK~M[Lٲ|pzY +^=Zǣ,ƅbvs>.~R̩8Yujv@Dޭ۷oN?z+W֑_VW}HD azqUQCcueC?QYŻ!-Swnc1nwafeo}[p~jc^^7Wwl.l ^^_,d"o,k~Diθj xlVEVLcs@Q%YŜ]J]A@!]k!`b{b=wvJ@鷻 J ISG@Qql:51q2餬c(kkAUւw4ĵ u[YQw]zȊK_lwVtIh-gn Si*yk;V%#heMxi&V͂ax[JgA@#eUyv{bs6[{ M}Ni8J"h}h886vD8W` A}!VA`J a:PxPB ƃ$V\]wzb!VE.S"7ʧtΑebF)GLD.@T2Ną\J]ŒLjG\#<"vJ̕)b5s.Pڍ^,(RGT{$ͤŠ3&Vޮ1)?&RCeCUus)&1>_-EƬ|֓y},7Ϊ}6rz%d *. %&f2roю*GAّ_9d$ WFkt;dpb"Ι+2ZpK:g `[fȥҪEp(M.%Lc!MJfh(&t]?ӘSz2iz=goc~x7}?FV(ټ@!M1U +ѽ~~.#fݵU%]WU6LI-FFYnA?>`weC{Ew(j8>>>c)Cӣ5CiفN(|4-m{VoHsMNzbl\QHk*뢒E6=XFzM%.Mw͹i?(Z춖q[rIt}tTY˧`];uR1T"` c.<B1Vb(d"ΦZ7D " 0<0,+Όu A:KD-0 y€^T&libheϼOKbx(5723_ؼC׸Ee #^ ZRstS":0Y!42ɝ@ƃh(6nuz!q+k;jdT-TPUrTCvv""nDn󄳢ּ\k'Fyˌ6Iߌ9-/M_%;ߡws,ϗc\?Ri=\ӝ͎oַi]=`Dl*{Epctwtl; s.ܳI8;l-'9VWmkvλj/nb}%;nW7웸wߩ;r%/FԘh7|E;V?,LggXr AQT:ͳMrAH 6^Ƚa S[B3vF'^odeI)CWl:cģ@zY;ŧ4t\Ӈ>H{"u$2.N@{m ۃmcZOĴbVԟ?7(}ܠ>_kcNdy)U*,7ZF!YcY_Խ;{g~'[  '.ȏuIr!2i\^RD$0ğIɧU"PR eCpL,Fg2/T؟6ArN>YU=g#[5D*Ғ&KSRYH^gÍЙ'Ӂk0l%x-&/c0Dt(f쨩DKӢDEY":9?LjgN'1(Y\}}wWIQHFjlFz\Vbaޫa/O 3IUN fK\|t~:?ȏ؄4J慉6 HYńZ9 A(n$0ERU[8( AHQ`SjD(6EF1(,Θ22bW3g3b,^ 1k]Q[WFmP1;g31$5y#>(nBJm:rTyoXd@21CEL&&Z$aQq$1Ȑd|jl>_WJ`<D?vEDSMCĆw.KDgS&Gf"IB -A(:2 > %E]X46@qR p \p&ĀNHICJo89QWk CKvE[mņwM%s1pey˒Ea,N$]6iˊ zǮx* a~8,sIF1ԗlNkُOFk - )4` AS0h M)4`"˝#K]6j M)4` AS0hLkL3l }S0h M)4` AS0h -)4` AS0h M)4n M)4M)4`p)4` AS0h "т~X>Bsy?P%i X@T]Zo|U`){=_xQӡJlos?,Ez1a^Ʌ݋bY3]S2$YV2 {?5s.A__$@:eЁBCC=$ҹ/y뉇3/5 tFVhwN~דՙz]`g'gNoon2-|FL7k/rX~zBqr>?[\z"\^LVT^sWVW<|:MnЧS2 d9X$* R ;,a4s{h9w=zNN'?;){3ieedsxČ&&Wc) AEocT Ӂձ HT&PôEȠ-e~/׾j,n^c[eɠz4)dV) 4ט,@9CkW I9 /Y쓆W!iXMk$֍~4nd,E<}fX#CiTOD3BR]eq|rZ:۲ĥK ә:m#lro *ս^YnuICRSJ"l=ARHQ6TE3L鞞MpaIn"'Jќdsk,QVit  7ޤ'>͚'|B&x΃JDbPHI;5 ~ 7{֌r^X2j EέRȰ,^ -J)Oi{(6%jռ#7p[x>y3\t'@_}90J04Cxl3AJ[>3p/ 3 jP9  AOArW Hɝ oF Dybxb*+J3c(0,NIMdzXHaR0E4^JJdA,s$k5&j: l|4n7Ynzw+$Ղ _ g3e(Q*5Hh#JCɑ92ޑaZ;y2y+%ݡ;ަή.bUjfP쌓wmgoB7n ) :);v؀#b\ksGn1V`VWր>#qЈ L1l+LZjF,B")k=/7h|FC!9yPꮑPeҴ \dZ/L {'6\H7Jpoʬ- %m@ `!G(L=%L4" nA# |)ofͪK8>{qLhE W슪UxTA]S~cZ^uR @ ń\tO;,V:vyoBJbׯvY>yRxu5O\>ޒFw:c~<#3ӳ|Eބ^w~Sa;!Tsoxh^Tt5/Cϖ٥}Uk? da3BJO;5AtC.2k"D(&)#r)4H#nX|y(5 ]"G0&ƊQjd#0J$*Ƶ?SnݨNɔQXV,DDꥦ0+Mc" [޷7O{dseCXC7G - ֙[>R3ܟ]ka#% tS.HY1`}1]HuUd@d+Awz7{:ujt\̞EQEfƂИ @>2"L/##/VE$)QWYf>,0R"%1dNF03,0axPAPfdbU=v>DpҲ. g]Lkr×d2z".64T:M}"u%X!B|7ՀPB(:8W(\A\G/$`5!ӄn'Ri3-% ػIAn)b5I6p~qSP|迾^ ~2ٝ=Pq )?86;x9&46>n`<0Q*tB5-$j[b|qKu͐fJgf4! 3#%i^Q`f3\rԶJVG/YjrNиAr;=O syT5ؘY/fvp+vPdIoeeny;sOޜ|M?g}xfߠ%:?Y_$GOA8MFMShZ&ߺ5ڕWiDOt_a,tx*&Ϸ_wAaf('Gy'-hLg~ 3dbT JRUPg++f2MJ?vT_lbKB[9. 9,.m޶㶇Wr+JRӑYw]jwˎGWRr;::^N_ʱsNnRL@W6y*FU+Ç_?t cdpOQD9Rh4AsLC6B#~$}òzUOۻ?~PG8DM twtXݵ/! 8{0)/6l"w__G&\=&]ԮZI]o["l+$\$EH28E cAE%5e`RAY<ҷծ}p}GO*~*yRW[3p+:ppz˭ &{P@Owc&-cەviR0yQH&^@Xgzꨌz]JXp!-ohAL ޢZ ߩ/[.GpNww#?:ܛXJM({W^*>.50PMqRAɻr[[g)f A1XDy zU ^cAs_ȹ]T6J!۠LnL )&9y +x$YFJD%\bZZ-'\os 5jm--3ڬ{`^E`P"X v|Bl_C)Q_`tu|\:+֘{c!,t󜀅ڙEo B3nYsBS8D-=';zzl[S^ 37܄EE<3nŒZ ځuZsWLͩJiZ{c&S(al%_/B6,r -oC9"t!K= >Y64trUӗew߭ H޳p-Hr_Oaۚ:|Իפpa%T܅>&_~>s:!=ˡ%Yu۴Grz(F:^]\7AhZ1Is&̍4',FѵK/yɣ5Uo i[Ru' Tɨ"d;2V+ NNZmн>9gf7Qhq$cnwܱ6BՁ22D>& ZwJ0A)&Ba+!jU ϠBDH`sČy=Za)4PMXqu%%V(epxi;V<EJiGy=)5cls*k␈RIÄ ( x*fO?U=4!?L$M) 0֎x@t 1<0Ҹ?blUq̚zLD2"P:LH Ib-i\`v76xo&}1YgE͜]4Z%ԣ@QӖ]Ǯpx}!KM(4BU*/\`8pW3 M3. ep'O6A!@yюHjj|ƾlT??  ?Ex`RgGfB`;|7EU6eu\յԓkK6R/G욯UDjU%.ys5u:cq^ԆR5%>QzN! ,jkԛ ,X7C rݙ: tRaX'{myq^SAE"E ]u"6q.8*J!Q qrZˡ0qa a 52Řaؚ#Qh}=~(t0.2usOAƸGKۋ| õ_st,v;s4:X1z[2?tHT,1ڡXw2{w*m nY$ItPUj| &oUcD *y}e>-ihdW U=S:6 ^W3']Ή3g(SIig:-F'y1I2K`/Gľ')jYBV~)/))z1*ڐJ *I٪R] -$}IJh41 %;HԐi){uԺUW_!6S4M:Nj(U"d- qqe!~Teް,73< <>H%0f7o*WaxQT l`>L|w-mʱ=3*/RqdsIR=/)"(m8t|K6tRz2frj0 E})͉@mne*-/yZ4oF"jZV+N߶~ǯD٣N13Uy 0 c6')Ϥ{4:_{A{ӠZ}w3|8^)IOQYiT;;{u6 kmZ߹8סdjh_k}1LN/5iOmRZ72]o%{rͳ?O5Xwh+}ʩ):r{58De0ɥP zt۹j zK̆S09@ PN* ;Uc'8Z'qT3p/YY6b.kN>b&B91de=.Gϋxwҡ/KqXMPS71'4ZUnCTF&˄h\nGw>5?ou~dmEtbjek}rA>AXRp®zoʓQvbR*TOI:2\o,@9 ^l7Բ(ACd[kN+-gתD_J^y{RE@L,j]U PE㬪NIBSkB Ic=ĨZ(ÅF\OIm`ϕCde3*|a7bg_/B+ײ߾;77ޔ[M,H]őŴA|M幆?Seߖ/yg*~7MN'kMFnVǍsO&\?yU鏋y_rְ3\؎H*muwuoZ՚KTEC0)5DЂ5)ц+ u9N- ={@n/Y#cz WSU T 2zv|ȾXkoBRkj3FT̠URz]SkS֪RBP(θ84MВa%ug<}Ztb0R1*[" (AbJ$bk&{vgxdӱ@IL"a"!DP1rXJޚ=B<6K)k^G56r(FbҡrD^TPG* 2W2%ȧ=G­tZRmW Z͓1~p+F 9%rQ+h@t$c朏re PX,;ʅhc'dcW%E~߹y":lYmˠ:Hׁ4n]<_u _ 0h~1إ.I^^nеËQ1ʺ5zOư꽰/,(!Ց UƸAjyS Q!LvVq͠;:@#z "`>I1'YjE%#@Ui-! ę &ECQM0 ,'K"WOTA U2&BiZx3{:k(gXe+m|a&y9J'L7/URc7̴ )=zOOw>[UGzM'˙x ,7QΓ͐_w_snv{]dЌ)}l69}h_V}U˫%Zvm6Z!j Ika_1LQ=~]Kk߲OK&[{g?q;r u%|6=NJҧUQ')9$ͳJ| 0^y$UN:ǚ٣)ksԡ找xjCpJZp+]Zӡiyi>KܵU=bXZ(s6W2`F W`ƒlr9R{Þ䅫kvHCsWWQ/M0>r{hҀz# zPoP8xoSق&V/$[/Ify(N5L|5b)sJA}#;` P%s1g Ԇɸlm{n)p&t1CGw6Ճ?>\}#vG :CBGp4ィ̆Ī2*ʖ@0JV[os S! j]|tn- !Ҕ=5EDPr2hN3ܺOjŖ(|xdn wύif|V۶&T!|dHj|5AlU }{,%&焩RJwBhVo Vۏl)s;(yߎHAy2{֤ؐBk,ƇĈJC EMY:p͌N~Z!eZ([A4ynf8%DlTX׭o n,aR$z㡿kz{lh9ύoljM=&6K-! rWgN%< f5!jR(spy9o<^", %cٚI\Y@$D2ڂQ<2 ']- җ¥8r[>#hsImʙ|նf5 f|43&ΎYͫ̿XNW̓J|‹ޔ/tvkk |Â7eO7e$ei]}I 2K{Mi3osn՚m[^G ԪSB SN Ԣ!īբUmX_,IHwx&E}P[HP݆\bYlmg'!cq9.Uqƺj;sB"_Hɥ'MJMjOX~eLB7Q|jC-UA`] oG+$;R \8epAE"$ew~3C8ԐIkđșG}]]]DF^H 1jB,ӄ]E43-{ND2 I`RjIl0$,Ku[3U}|=#tqk@ő`&g`>*l4v=+Q^lFB55$jkJ<jWSnjކYh\xRl$_wu.MV6:od]m z(jp^Jsy4T48fil\5^  .$ݯoߜ߿y{:?߃F=0+X\jDyoK^yUUST-x:Az祐$ ganB(=~8tzt|y`崑} a4-3򙟿b_\i"JŰV . X?b \,+v*Wxթο0 y)oY Y r)h6Y|{Z=N󭂸(1a1jJduDAzbnR 9t~=1|Yο^%"aEEϪm޾tsdz&W^ͧ0gKH{}{g>ds+<4$ϫߎ7Qǔn;F?mlw)` =I6w__M:!Vnl_pmvطBEb\$gSd>FDPTrX8]VRZ'V޷zMR?QNwM1H G#NPH 3)2JE0ҁ+F#tY4̮O6''P݉s{LYw=DCF5fU4>o}0R/!v-2}3wtV}L;R>vmRzuj*ùӺO(#B$[قH2hK.xd["MQՂx^U(q"n ѽJŒRٷ{!@P0t9({SCV WĻL= oj3ucۈ |˽e=W2VkƤ'FޛFB0H#S;81!$ҝaqZY/c}[߅ⲍށF4gLs< bdox/?+$=ul )՚:ቓHdTG.1 Cqar椿7ڋEӱݶk17`0f6:nwOX;}==x L:1@ ;9r@ #£ B˫=0t'Ec$%f+ KQEhwH+ΐYo{Xq}4`ɻfO($gB gPĮ 8i\^[p0J_(iQM6 d =$VFQ۞>,ء-KmxREW:&) & Ux28N6A!.H0=rqZۀmVNg.ojt-Rd ZQOUfJp9duu(WAnZA,#vɮyr|HwcXU'ny.دHto\ԎRR2RAIM4"oH@`IwXԖwX6X6Sr24"REIE'vBd-NhlUh|d*ٚj؀0x7Sުo>-IiҐr^sݭg? F{ x]hZ&|ʭGJXAk" )CF"E ɯ/j_v l]p^`dQcP28X3$,pR>F7(~X2qq0_npЗ"xqF?uq|{[ AeqL[)="x$*ff.0Nіyb;#Xڶwh9ZzFOag.gWF,mx/0'dN39`Ѻ:XaCQ^&:?-X<PY̙C:<NctfW٦F7z6iK['⩝Xy`ܥIb {ep;oB!gh՞:#,RFXpڋ+/SnkKXw. ~mLmzsF K >GY6tr}*_7ؼ}oF5۸Iun3n7G[]N;%R&=ARdG)F}};ҷ^0˩Rc̭5xM1z w(p"f3ɀt%pVCOԢOS,$ckTpլ+pJp$\UUVC$d=\@℃!$]+Vb~pdWs=KՑDLGwK0njMk.qu\(!PL5X>֖+Z7@J@5؛j/T)t..є+h﫩'±jqZp00!~ w+߳xV>708;<$E"N>Bs4^x{vS"LCutG=KiE]:PT,R:\%qUgWAZɡUǫ/4Cb9\%qW)W9h$%WR ^!F6j=\m%j;q|^JZ)d;)TW=f!!JKJ*i9WIJB{zpE8v@`!WI\W cWIJzzpE9x+Cp3J8IZ pqYq \%qYg+wTu 4\ZwǺ#DWIZ"pJL-dR dڮIЛL8dJmY(L\>A%풩3ZWuTKj}HpM58]:L c!rB GQ4ȴEn_R[=ϷATc&^rdEg&Bmp?0F [3]1°s.`9&WjvGnWrR0}A`=XQ`穎jahUaif/=~\]_U9Ud+JA=xH%=cgmI 9Zr;Af^+owpOpp~G u^GqyiYj:Rjsfȵ1O$&zG{B:Mv2$ <QxΘa:v>qNz*$谹 :4Bz@\-zf:* "EDԘH\A5 M#18,!]n>ѡ=3ȡY(IKLc1>Н)&8IOp􉭉0}^j</ {@Xz 'IZaJ_fe59eWQۭ->}u[YU&]0 q S=UQ'q34 3Ei!Tu+¥&LIQc(My>AÅR5e]iMu5p4qF֠ s69C֢aLDOLs"Q,r06 sPSd>cGdTa-XbA_fJE"JFc&Kc5PKʝy J*XRƽQU#K#XhBH K׊(EB9ut,C} r]{ՕZxXgm% A=&C%ہB!ڥ+, 0Ƽ3FI\ڙ@$-hT4zF[?}3g0\z>N>^{)Cm 7T?ia3{^ښKawcny4uS66MPCz Y@SJx->u?ZmO#3ẘLA_>,ލ`zYv4]3wk pS\Q69ӚndoC'8﮺%uC_f8 ndIZgk|b[{N+^Yrs _̟ڿd&ǰWǫw&.-0WQΧ ~a]d߼3yKmTͳ,MinOml8-P`?ȉR4gKr_+L>Uvh[ɱi9J+fʠ FYx`U@JĽ {<`9&rP( "2w9lVJHq#"ܗM=|'ku"N3D_Wni$ZhZR$%Y.*ʧى|ܚ8VCQs~)p#m}by;-o)U;?O&sra`hHh&C@C4WC { aQ{BJL1K\Y_N?7 u'?/1J nE䒈SB \0(֐Ekx&sM wSZ*ԺK<5э«68q eNFD cSF:Mɡ+)W&Ǜ-WkF7+aV s )Ѣ+/b?] MLh"u!"Z^8ly 5- C8>UW:O Yܖ ֒Hx4)\sœN kq&Q0F`Uq\Eʇ tDhwa@!h͙DP̙(s"jGsַmMxm$g@w)~z >"?Mt?<~?*!8W[Ԉ|~"mh,g9C1&*% (MLUIzy&XY@4A Rhm!e @ZX#k ^~2rX3N@(d,8ƼMA5 T*HC LZ&nCh7c1*\s18A :THY"!N@mr-Hcexٚ8{g_&+r &a#[~ڝ]BtB+xSdaw}f\o;w>qno!6d)nmBۻν5g(Sx͉غEz{8Nm{+C}6e-Z[vk=z^j]&n'0{}t~y5bq{mGΩJCgٌ)H6B 1VD`㑮n7 ew~Q3%""&NEUL%9y %2S&$Յ$`ѮY6. +XA'ٷon\, l-\۬6SF}钖A*]gR-A z..x"w͛ a$FTHBni< Pu7̫88<))JGן/M gQ>!Fm|V)Y:Ύ.R05q(}F*{*ɶtP'~:~x{'ӫ7߿_x͋_̼yo^W_pf27 "*Q"($y??`h8|hnC+SǸ [>+_lCw_wiU6h*#9(r74[va+(6.*Wj8R TE*;/+ʼk{ 3KeQ7(^ц[9`!fc,ƞ]O<*{s3 Z%ue!?a'uۋ R|m2wb?V-Wk8|1g=U/4ר#wq|׊r%Q ;0t7ߝ~<lJ6ˆw}b0&!xeM瓫jg{%[Eњ}+Y/·"'f}regAr✂D RzTRNS֡KmVR̷\=8A:|2r%")DaD2(; 4\V TvC*&|Q VK {ςZ9 ٻGo#_CJ|X>׾G-_ǜu>>nLZ{V>gIF /A!+u6Jsme"`) p)!Qs{*=sy㙏61`:mWX'5Qzd2o6xoH8h,nu ]%/œW.CZJ&{rM:S9Нc΄dXU|~b2ZAJhAӅqc`L CɔY=:989`4cVK HT)T h) @($"RVEBZ,IQhJb#D2AX!(+H !bk+%b- {_o?? nRo5KƟ™t[}mcշ ` ǫg«Sʏ=k:`^U=Oa"y! oe hV("R N:(yG+! PGz mT"_ȈKJBX;"D\)Q)FJr5}p릃ɕ3CSÅ^;^3!ס.rf4pG˻B, \>]Wè2͑2޼eC{ e2*X?˙ b[wpqKR!zږ#{=;hYDE O5KSDruJ,=%WH Z OEZn)nڠ?;#-'+*Oc9@u2 NhR)h8cHo3hTob.j00(KJBCU9T<(HY`}7GؓJB*=r`߹h.Ɩ_)"y)v϶[쮈,Q.8W(Olx8JG3y=)@$.A kDcBi $5s%!xE_NwfїuZ4}G!]B= nE%Ԇ nt [/܌CR*ʤ74E,7[ z:z's$l4F4LC& ތ?܌B{ZMݛG{䏒GB9%׃WqT&L{,;No)֔7i c5c YQy=%f ۟Iv{eν?ֺ5MV;#g26Oޓ!U(CjU%zÜ4~\fvXCV=¡}-JN⇒\we gvnx. ,&;7Y?O>Rerm]_7u$4%1@QΨ>{v+>VF[SPٚsy|}~_2~㛦oIb1ibW?ǷC7G[n[toBgMMn4\cm_5솉"Ի:E-VyF}9ޓv2|_[>w"ѰpќNaV{.3l[u9|Z]d~2ܝ,xjYrmD7͡ћ0@C/Kg@ęH:xT%e&PLbN0}ڱ!e ttW`xoO>(ʢ{nUFGO T 1 !(\1ֹ[oZ1zY},:sߚns| {gmvVu‹oZs2xROp&roKXvt>6ތGň[?1(Y- #"V]OGߟNjMC\G*-U^RT[7B!:^xCm{?.}[Uɝ&fhƀ}7`ky>ҀD, Rp MPڡ[5.mI)Dۤ!)\ԭ`T&g:ڈ[]ko\7+n*% X`2Llbg"D<-9w}[e㎁>,:,&[bqdCpNAՋIG48G:O0W}hk;>R;|䂧X|YtHg_Vޭ7Bt=O?@#6#~lקUueYoy,攈gs1z-XU@/ =>G;rXC/ݟH\)Yyqڻbw[G>CJK?rFdq_]Wn|X/T:YmilK!Xf > {NWm5DxD4'%*ni}BL^ws֫nB{aMJҩ٥1 "&7[nd|^H'=I{]*$e*3F8X;YieHjjj5)ꭧdp 8 nfUqx*d.#~ݰ_ eTysqf^*)7 ׋?`?4i&PcuҢk^+Xg'6P):6j.j"S҆KّI>H%ആev`{i0Ofipv3v|QX̽aYTYY`\?ۡL y&1x(H|W[$hJ|,!22eB-kj3ŻbR])0-٫͇kSSr0x*#ɌxdKTc 3hLh9Q\ʝA!p٧P_otsye|`x&LVj`K曇i8i HJrqdFg7#SCWgM }%O8!m],e>}iT>|OÏ@a; msx ~N(6Kv/;~DUcnW?n+pc4?6O{+uMZu_xsdg^hsov*WapYۿ~Z2㢽 a(Yۮ1.ȼ! [hέl-ނs]{vY_"@ޔ {֔k[^xMb6|,_a7{ܰe7uvo1'?^aM7_pQ7WO6<߹~zq!3|7{jt {{[(6- -ԗ_˫g䮷Q+. ] Z1NWG E56] ^p0t=] wbHW]ZUw@tE؃C6}RHW!](]?`Ϣ7CVe (s+] |8t5@xjtU0c7G CWnC+ՏݸNWe8IWwk98>P>]y7wnmlj0Ý:>1r&tf_ ;拓yZ~>їD 'w48 `4Ҵ񐞈 wڃQ-zROD$t%gP\/'&']opthç0Ri@Xy|tAg ]q$tsGk(ٖHq`RP|{7T.,V,nQ[Άԗlјo 7JWח'/s<ʸ786,;S@_^voƘW1~w KuK{׃ŏ{6j{nds7'MOݷ#wKd'{Fq"yy;{?Y~iQf+u/|sfD!֕rط+;dmq$"'7#$l}Vdm}Lq)9oz CU U?߶?n3<=Qo7p9yד-öb9Iirbb+ *Ż;5u9eETLrTr\:J(cE{Hs *3 X )%$ڙJ/3L$Du>PT^dyP)Ԑ-hCvLH(zSPX1s#OutALmYklwH! S=ԆR ilI2*M(VJK`WS!#<#ݫEg o0N#4:88VCajD(QN[<ĪhJ|Y^Mɖ5!1!%/@\K֩l0۽`$1;dsAL(K%C XeãB;`6/ mG;{Z*zjZ b@l+#7CqVrE7(cºo%a L!1C"2%vsR)6#X4t,,xO`h&AC\KhSq(EGB`@ G>$D VQ rnLzYi{s6͙(J892r"Xq*Om ǥ Q J Ho[}`[jƷxԏ™dlY"7l,@%1!# ҰS[ck!z4iYgFDwAY5-V̈KSΊD|1&PTamaBZ!pb$Cy6&^8-!z7,:x ުNz w zڦGka$-A7 /- , >7!똕Ls+JR!ˈc*,OHv9!,SBgą {jN$\`2*#Qcvfd:]}4|Oy LC yN&uKe4<o 14X0UBN5~d}gy)bNLc7YKaVAb; ).ٽ{ˋ {Iw."Of=1c̘VxY!A]ڐ=X S}L! E=|+h z.a8j,!hH !3dЎa<<[^AWHXvmQef ޲gx3 2Б5{s"8T)fLf_4A2?AjDao<{qUg Tc$YYf RNEBHOt{3S[نI&529 j3+ioTʵr2U{Y"ESn$H@}AoHvd fƃ!Hm-e@?z7u )6}~L{u~r$ C .M!ݤ-،ѓ4 l }O(dJY:Z57`ΓE)@k72EB_/(NړjÁwàDl;$IM+9P.Oڪ(&:A,WR w*TP=`ǰ6Ð2:]ь,- =>1r77 3,}BLw`E^qH"rm6n>9Hc|GAʟ:CAWtxBܰEC8#f{fD 10d9T l5?3E:ɮTF?i2<4'iژ'*RO kjփ*fMJDxf҂M &uc)d$#TK!Ok)dA^Ɣ?jޤ jw&*އY 6(- @P:j5Ea2S =+υiq#f0]pSC3"8 Nr,Cɵ+zAoA.eAb?5fSM?{WǭJCX ^Z} 6r!75ь2c'ߪ9ZX̔(rfcUyR"40eW9@I-򢀇`Df!v,2h- zi[p^g`F *70&oi`ઁ#2 _&7LSi,1)((pD#z-H GJ "%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ Rh@1)G pU*6@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H ?JB} +Nc 5R\ z]PΚ,@RL%%7GpFhp FF¥.]z""0ױ5:KD ҕTN]`l4tpt}+DɻGR"+-B nw(yk++ &""+Ϧ;֊t4G2k[MEdF #hBX'p'YXA}s 7rO,Eٛo= ihʋ jP;*r"5ֲTiB* SJd+^J|RRrhùy)_<|&zlV#0۲ Ԅm?~~?,Fy.̗+.j$B1O7C4)HHrksyw'KQm-[2:Z[_˵I5 V hH%ւ 0OOr<Ljr PSf_&h×a|2jlڥlhU3S+LB%L0鹾Nyh _s|G Ci@Cyִhd}m]PV{2e>P= &Ǧc'Tdv. |6ku)ml%u:W*W!Sה?VՔx2uqPBJp7CaLǒ^d|# ᙯٯs28j_K&񁮂U4׹O+AJ6n&;ZT*z•k3!F:@Fw85J/+J)Teޤ>+w1̪JهլgTfou%j^9t+YzUZ8'2Kr 兏g>iDLiQ晉Ugu*L{و;Q˶EdNZ֩`֥ѹv5@TˢfZAaS :(,޷8Ḋ) O1,*!6lͲ{^2bZg&AkM,th-}X +yǪL>Dulv qXL7[w]uB lJv+Ituߪ2"+̍<RDW{HWB1c+ϓ"]\x,th;]!JC>ҕb+ +q}+DȻGR5<B>] DW{HWZkb`6B" Ut([]]Xg|+npe4thOWRK=+˜t T|gNy~C =Pvt44p}4svVpwFoU<`+Dk{o(#]yci[]!\#b+@X ЕcիCfjv=]ulw sNhۑw %@WUϭ=JvruG7=J8>ҕeŪZ ŷy%dXISͥWv4Yq(Ln!ݿx,Jv9<{?awv=`޵BQ^qVg`#fRoWO]\/3mwgt웃?߼yMfAn>,eQ܅*yHv6MgBW*]s}?7)ɢK}hYhEXj|ǫ"!I8)j[I̚2ߋ1rs˦>*gS/DTKo &:b:L'#+/6U.Y!r+[b}O!'ӕJhaUVLU+]xg9"B !2d 6(0wa7iO'Q`nw1"/*s\5ԤU)`)rUd^^P%3І'揻B?Vt$}ࡄʲ( 50X̫ȡ)/d|!m!վz ?Y+o2eYja0-Uf7 !y`4)d6|#eIze/ _/az|f򵽥;rkEЃf~оTj3%[1ƒIu](U}3sPx$f>JYuG/uTwj ?f:(U>s.U4.Ksr:X?|Π~ڀ:w5A%w>!'KJA?.u_a13w9w#Hqg.TѵW?~˷J+ :f9 Ls)p&,.9"hQ?{WF OIpc{w-HnK$X,,$RKRT %QҐ4Ք({>ئ9ÞgzZ YDd 9J#37[ 1z`I κ*9VeiB&Dz7r8DkVi)&3#V))2*AT&GNJ%ޛM?>M@SqNP#ۘ}§|Ks8]4W= o[ 'c9γm,̂4Z}J>CzL)qV={JnZa-#b׼'߼ uc ԍr,4>g W13z1)bUX8"VyDERLprbt\8hFCgLz3wGUZs! SdYk_n7Yeb:@|6dὥ?d#B7k}Dl{^4E⥙y5u$#N5!~|Ұc1RWVpǫS :|,T ^&FEڸEDtFGd8o^ʑ^?L !(^G)"`F"X1Qˤ=!0c)aJFgb;)E1J\d#e'^9;&Ww5r~Y L6y^lTuX=Ug{b.b*B얦MϮmk]ȍ-fu݊wA]JreV)K|Le 0 0Hd.2i03lfΎƹqJY9YD M߼LO{e0QҐ>9MR(H>GHD3qcr[ +s;B}9G.2G:u@`,m@m7e NM2bvV5muUy^6P9)7ܨ6^XɍsJJ͠wSJ52tr=m_pnaU*S7O-xCkKLFR:DԶlL7*UL4s&"G^SYrfJyqi <|u&wQb'Ey&E.4t%W^S+~2Y5ur}ۇG=Ogak<qO; GFpDynOSst*aJHL#%'X &"DdtzZ'ʯ*)FY4vpiuGd諼s'MRGsϺ􌟺`_/S|Cީ?~(?Y7d6;uϳEKg<;wh0fMWн'dE+vV(`IVkl5bmwSI[r!vO@L!7cX761URw#! 7TRW- i~ɎzMnl ِf̟fE17'-&JPWA>vu(3vIb<9.Nc6?s>Ni8\Vu'7izz\ئr:fAT&4-2jNWj~G}:h'4osnod/akFovTX\5cn垳_7Fodxx|ó*TtbQcB"G˴J%:őe ٯ/kYox/nq6=[gU!hHΪ$"gS`\*A17B+M>|Z*Y CHgߠjcc-vw/xzTtyyI9MUGUoXuzX];}*g| MB]ݿxnhdIg'SL(A)+&wNqu2*OjliDKӼdZ|c_6Nzt9 AI(m1lstɭmKmnb52%+‡UQy{ 3cHO`0sQ̼FdƩFl? Z4w BHpg-*޳sq.fAO e ^DLЦhȹV j#c5s#c=R }PWB=bknׅ$@v:UEpEX^,;;٧_~i&Q r ]:9 8̈́ZT^hneBfRQr̵ҕC*FB@T`+ L49BA"89̕9Ϧy4]ktڦ2jGqQuPVќD/& UbQy΅8MA[@,WCbNsE21Cȋ2YšX0"  Gh7{,S,beVX')[?eͥW#.}*!쇇-rIT |oKhwH_)1mck_|ie/)歐g|ZO]r'\:SWWNͳ Cӌ%zgnBf/|> h2BC}S3?u=pLnêЛņOWoI̤k-Ю.zP7tсwt?8ܮXh &tn.'?yZ>y?8]oM>.'=wуBOpJI TϿV,6FEGaB~TwVѩ;#RwGmCr]Fmfh>D$D3Jk9zAq@3T5:C=e ^/OwnC_vj~ݚUt*+.r\*.FiijkQf@u@ u#@t'%/ H*V{ɷ9g<+lm+l=Stab N L)=4}'7]乞%X2%/J`C=!..yDz?^`eKK0"X6\A.TA"m!V]kF+#/Mvӣ,M$*14CJŶ ]U qEu$IaDs2[畃K[>i))|IsΌ$C3ڶ#_7sx=<J z|z3.Oss?Q1PܫW:̉҄S̈́YᡯBsk"F;Ed B{QYΥaRknex.4s33csAeP>0iU浥ķz585& tR\P &K_Pǵ1ThS e$HmbLigD!RلW4.k 塮ITd95ah ^Y DaF.QʽTtqw9s Eq!3ː2WK2&@G,CSJg0ykDp`Z ME!TocYqHG=/:H]H&T`TB !r.f㢍^gYZRO"͍aN˄r/RėQ^n!}ŖY_]޼Ȫ+J{D.飹TE*R#dVgLe q`$=^\~$5:nc`lOaݐyxiU=52ȅkF>a²#7@[O_Yy M)9.=$7kke2+ u4#| )mARy gUj/RFuϙ TAvm;Dcѽƶ{pF{e*TĦlZ0ʰLsK;w\g!B6&ꅼk-J]c%g1ndU.dU$d?dUq˖i++kE4@Vw,&DY ~zdM?599]5٩fp<-]5C+O4g 2ݢ+ـdOWA02BLBWVt=]!]1e2BBWp8]Jӵy{,t)VDDWXp ]!\whkNWR9ҕ\H]`Iu4tp]+DTOWgHW*Y/JW؜|{X3,`{W2=Е8+]`kD4t!]!ZyBO#]am-]`< 6"WÈٹAFD>BFѮ4i,i0)ULA0UBWu~FQrҕWE#`kY,te!%=]}1tj^ 'U3ԫ'fh͉6Ci;L5+վ)&"+-BBWؑt(5=]!]1.)Wf,B2Bus-TL FCW͚+DyrOWBW[cXDt0U$B+Dٵ9=]}SE"+|:_2MruBO#])>onPh ឺW3vsWHWUݥH;d- ڍѨcG4kL JOh.f9:Gլ 3IIP4rND+;Q~Y̗CWu Dt %]tezWhhDt-t( Q¹8yfp9w(5 S:j]!\kc+@{䶧3+T 7DCWjFe,th:]!JΑ$#Fǔ P ]!\whOWJKz:GUo@'9O߾Nʢ#҂L%%gB~/'oЋo#W`Ju}]*>K/h*2gRH dڙWW/'?ndCx|>*hW-Ҳ/*Ǟ5^]B)*2[ KRKm\wڷ?I.U*FVq1a.c <їu}i +UPA)UW35@T'9!oDXR- חJ]])#vT: ^~y_I@9n6 Zv@C'tA^Zʺ_6rM`W~EgxDŽAlɨwg'+LUկMCL$Zz7?I'7fbWi:,a|3Ll 5¨+b52_Gkn[L UA*CyXF !h|Ѥ5j"D8sRhH+LJh &JyktOC~l,-'X;im-DSVW>{DB7RO#2 EdV>ٷ:lp3tIR!S4NiٔsOP!Zo{K6xZmxX; "e1Yv3 [9&[׆Iy҂7L~mKQi]$9_J22)Q2Zb}yyE;X˷y`rY[?쌵ѥXd@5J#IyEK&J6ъp)e"e]DdZ}9vq/)#vKJ >lKMwVjn'#Yc5u,3kVϬJMX?v3k+[Yd2dkR)`MWSI;}du0\EosHXlrOn$NT/Xpn0H.*G-jVh kɃ끦U*Uv[WコիW,NK+7#_Z | Bgbg M<. 6iK{1}V+>^ƣ~ƒZfoe6p{Ar ^}On~lK+bfa XCûi~̯j0&\Ik Q{lGϬ>M-%FoS (;n|) \‡_E.-Ff<*W28{\`ps{B<&Wpn 4tr}7 £_d~R?F-Y7Km^S_]İ{WK;QW?yZ^լE9QrOnqy;썻8ͨ3'eu|.\n4woEk\wV jyEU^]lBWGwhN/dYTc"߇j:}b4o 0u_1L~ ^tX\χӡՂ_x^}ۻ0/$vX\Y3+l~  ²ERz++ĤLzP_p 𮜆ז7}7I+^2YN2m  V|] u) Q=-Oi6Л2t^vqQcom?xl:qݾmn~淊ǙFjs*\yk470L`@^r)r2krLgui+%W YPpj2xrTAU OTNs;ùoql<2c AiO[qvs=vv//ue/vmQg\0}^{~3rg8_[o=ۭ奦gyʋW*ާ:/hA*%֖y- _X lėZxUE7'Ō+qhR)d$37:BʂE)A`fVֹI ?PWN{έ^?; O[1j b"AHe?>3Bx3o3?OZi{ vvך4]U%?҃>xvu v{HVvJH ~'oR!ׇ [CS`yT?yP Gc qH-HO~Zɧ>n}z!s76d lܞ_oGV#dK|BÉ("fLXLO{z3~}ю EF>A84fW)'a޿xuNj^\/CZHkrqdoS#6?7tH6G7:_6]h5{ӛp9?FmG]=_ )SrR FD|&ؤpQE ɑ;P6WG _E^^]>M/uw7bowfg9J.pyC6cv>UQ6&|/>ejfUgČ|yjrqBV|2kwב]/cz e!3 % SYHE_!D \ZSVC8)80^A-ǶqTܘh7*>%ȴi)hδWtV<^5R=֢DvJ%*С03ZV|AzbdYhAE85?'lʻCLbbEʈ-pV9-c* )Rj.Z5FQwr!bT}*UœB)}NN:RZA526g72*Ͱ8 mc,#=6;jyU@dgr5zAˋp.b/{mf13J*BR6+THZZ6QY5PܠHk"qäQiUaS;OL /LD d6 "`nٍq6M&ɠvq(jƨFkig45oX1T_ QJu!F$Y^xȲx4l!33h3֤,ȩ9&,Mب16g7FMUSAfPDя8"e!Vd؂+Qtuh)X St\2ZIG8o6h y7T)R0el$"Y!5zin:|8YO7%Nl1.G\ZTsR 2(PAu^&>^`'[?+u4#.OC!GV%9r7E,tUEGLяZU>q]ڙjWɛfBʗX8X3G%߿U8 w9K0_?I`s,>F\ĵ+קIqIxE%E/%+Lսt8_U|?@* Sf8{Hsi׮&f>N?0zlX{}7auLV>c7P7g'+{Wb&ϓU97 _'owo'<'׳GdM5ߟ}6w^]-/<{pR;mm`2)KJBTTFyKM q-}j+m}u =%6c; ?O#y&,U@6^紸!-l@{(:aAQi)rB#Gs,'=Z \OZJ޽;V/Wh.-ĩ.XpZ_81_ B8dB9 ,I%@ FclJM.E 8~I^p>t|/\fFhGL <&̃=uwX4&Nn/6Zj$JvԌ 퉢[l=+eӈK(N( )"!"@) $ӵ )Hx `ri1lZ#bl dQ C"=y ؔvi M~ 1ԛepZ"PxjQE׺l(|H- 1|cSNKʃzJ'cJ|:K؄!%) 2Hi䘣:j/'rt>{O54j`5v2}=J'ޔ~W>o_|ǿݫ}J*za.Kj+I{ @:6| 5[tMNе!z>;kŐ'c_q_,e_oN-=Q m4VQ^ `fϛO4oZT$ -QMGVYL+VuO*ݹ g+;J!)I={}y:=ocA=Gy47|͌!,jN={Hf6^I骷F(e?mt~{y9{KbH-%ldzU.?Z> t|-T6 /k~rW1;?a}8&:,w&`\by3Px]}; mzn_q3/ٚWݸ?_x;s+aRU!՘TKDZ}1őeNZbIW4!"µNgm>Vbǝ|{^Y хCfB)Gd׶C5\)3]y,/RwA ˫wRL`0طbwTw7B^ݔUՉ:!N?b}3JE:ւDZ0Fv\,IuV9U }otx{oëTt E[HQ+o|6dr- ԅye\ @rJN X)3tA-.*GZE/g18#KSGO?_|O@ʚc' Q 4H,U<8:G9U" |Vk$ɒB4NZu9zQґ89r0='^R;.6Z.yaS4S߆zmr*:s80^ʪs,"Q[eXJLmȥ|tAw~# ng%yQ ΆrѷI_W-OO iO_8D4EĈcډ4UqRsb&VMUJ{rZ$-g'[|7O@Wie;𔽗YTDP̱=uUKSu2DQzTU)ێCkܬ #8;x{˓Woui8K +>a38J*gW4%1sc jZո3GyH%BZ()^9QN:.lLf*B *䓬r-%bB/%G-)bR"U9{[%,Cs46ϣ Ύg[ˋ&q]Z#ÍwhpY"輪l޸Kkyy޽w]!3ȭЦmVՎ߅&7Z}O4^r\=dz7Lohty fNĕc\9[nOZ^]|s;vv{8?ۃV͟OnVfzC9Y _fGwe'4tW=_ Vg~/|p|svh)zv͇"!4[~ķÖ& Zv(د4]^N]0샡 ]5;]5ҕ:> `\rBW -}#]=B"M+ ]5^?Hk!h骡HWZҳγ+eABW {JW GztՊ_Ys@t怒UC˼tP #]=2LNzu0t?jo(^Еʺ]Xi@l^6 My긯ۆK~u@n6op:`]钸Ga9a\UJuTv}^ܣ1edrlF; Kf5_\?<4Hyd.a?]-ul?߽6_%(KHEG9*J6$bLqA$i|m=ϫ|(z?L;M[Gh/)߄V/_VGҝLruWjI/^WnmEIeh" dsؿzCG+12$" #+!ƬR&U fلtalɊp,p'\V{'r v"ע:m 9]MQ#=T+sʱ7,VgUdPR\\E=|E0tlΌiJ kƬPLI'Q) ִ> -akuQ,e0hQ !cn@\-v0 ֤2CdS]n8SDëBٚ|A[{6_ Wj37; A3 04^x}G> oӐ- g]HR%&iMFZ1q1jL!mX ;oBͫĪ!R-ωP̘mQ;QU/cPB&g3 4}Vһ!0y)^JNxN8:Bk0!5ͪ_֛9ZC&&BH԰t`\{+Bd/ FҠq1QJ680Z*L6xE*mV$LȽ` `БBV=\]rqۃaJx-MV:k0`-Od׬||`ZSA mg+J{AvP3pP:X2CVDP3$LiYa 郕EJAԙU}_{ "@:tP2Նě q[ |tXduvJr@c ?ONXŸӬ(aۂl2$ `0~Hv?<^Y;y2u1q׆TX>b =N` l\`b6y Zt Fy $$BLFR{X|*(S`jnj<@1!\VUޫfFŶD#[-mT9>pTe#s4qVK@$ frP%VF|d@)CeC #RFdC0"oY`H#8 S.H>K4ԚmNot,OHڲhR5M`YP7o5RPY\ץ x/zdV`!-t@f&=.yq7$%:&ịsW wàDyxBrH9]nxВ}qD[AY.1eLv )0HH:P#fi Rn FַY&úI!>_|+Ip +E@ӛ)o1! Qѭ bS! !jZcx*Gd'Zg=ku @2!s6,ѳdD).XZ#eȍL+cuփ*F((uͤX LGjJҸh 'е5c3(V8/#Pt/m%Z!@6(-APX33!-+Zim@ n4BS1F0%#pM͗EO_5ךeB\2~4U5ߌjajV:m?oO6[H)c7mKBP>'6whhlRYOtTMJ}`?kh sI*R|>g(E43Õb5kYM4"ZWY)9u^ eUMMmTb:uk^V?v>[SRwWxUA_1ؐn*`chµ~(*ZPr.>ZKgDWz8OB+\+BWӕDWzaWWBWJ_vB+3R[uXtw+NtԮ9oDW ]Uv0tU+=Jڜ rBWTNWDWGHWR()݀vz0tU**Jሮ0^U< p ] ~tUQ*Ktut~H g+ *Z*ʍ]])  ةUP hNW]]YՃtSo}ikڡ =D{d WtEkštE( >Jll0yv!q|wh'&gPֆ NgK,&Hצ ےo8VOpz*f>=`]}򐶝T~8h1-\Ѫ_ǭ(&=BJA=ipB6p ]USwR?]GvxS]oצWԻZJ)n(7^ut%v+AtԮp!U`b(tCR0#+7z@t%a *\nBW4NW#]Jj̐{CWvCC#+ŤNB2g%ż%ٷ')><=њO._S };9(o5JY(m (Y9 ٞ^l2) nwZww᫫5:MGޮFmw=ww+z5N$}D?-ze7?E7&ԃ}H.d=޻)Hɓ.S"Kg!S}yV8}~1č&DQu|0TNVO~{}:pa7\BZlCWeN d{yqT~t~,dq^SvǺ.υJ._OG/@'k_]U|}wQO.&:_~0O}t}y}{/r\ݭ;5Xt˝[i5grMFpx;u b9Imij;-z*={:^֌f،?n퉇_$9{fJϋ>\0ۖkgnel]_9ʗ12/vmW,~l\6.'SO@!]yLG \Wߡ%Y7_;oq9"e<0}nv#7ߒABz#nːx g8d쾒ufEZ-?wOzHX?x.buus7N_\\^U>]B}*|9I[Ua;Ž,|So}sgAu5?GPYz^չ5oO;9^t禋MAi7-{zQUtHmQtj*끾QWy{M|ۻ/+ckIߍMU73L.9La5Mcy *W6iK>h7%3@+1J=VVVrs PK%L˅7"(QLb.^}pn-¶Rs~1򵛔h uKҲ;v}-E{qӗܔtvxux~5za| szuKp3~7oerhyIe1?iL~z'U F06KR #V)/:X`l2mm`)f:94KBaVC"PcyD D @ haS+3kRMDlMQH[d1^RLAWbVҤ-- >HEw|>Ɵ˖h|w۰v_?=Zk,7wmε_~^oO $2.ti,p~X >70?G-VW׎xI墾m,Lܳq|{~-߅GvU﫟wu}p~Icc{?1tO'6@m|-mSkUtHV>-?} ` 濌/纝cn|I=E:w}{ ۆ]|Pɏ2Lܼ+]ʫvE;}HWxr@OFddrWSi;~zij̜xin#GE9 ${noe'ėmTT %EJ@d6 I2^"BA9] `H ˗Qe˅!d^Y%i{Oz9Utږ갵MGA[ۯ_vB_wj;W>^رc*); %xL0 $ׄE,juK"t6S}3,og헦͍[dTv3g?o/=_@sX( G.Ny)I'=lql_E:?L:٩{c\3sCnr|I ;%Ʀ?ߜ-W/KI!1wL4P вb<8 *S4F`Gp7v,Cl! .KưN2Vjֲͪ ȑ0P"fš׵WSȩ2P!`9D]t&J:rUG06Uz 5tȂ*ٙڙw3!9b&D6$Y.G0FX\uMfQ*4=7f!!c&N#cj~W;x4ӻ[{ӫc^~'!clXQCDQdY6^\۪̦!E%?1D&WHkGĚ ā"%$mtzaqT=nwox-UpSln^11'bV9另`pC).VͅRi6)B6* 0M)5)|tV Zx@bZd)⒭-UiuiLę0&evt\(*lnmT1C[b]W>p2$vjm tC TQS!Uq}Hw^[9OЈ90}#W??\?kU^'D<#mdBl?mfcFnVCmdPQ8R"dkq*ES {5ҀɥPQO,X{~hs7OzvO6#+Hw,G>'\͸N'ʸSOθnrχ -,!O4&; 2DJ ѤV )5c~P.alxߌLLfAhE>h:TIVHzv|ȾXk!)54V-JQm8;)"锵IΨ: ВAZe/mb~~>li:q+,94E*ʚ.Ѳ{MPӀ#;±H*gU1舊 c9N* TN p6 5nO~h:e4ö,~bؠU񐪕 1ѨU2bc:ձ I.f%Cպb!$|5>9_ SjbPlḱ 8YA8OOI Mڵ`r mSRz791e1|9ǜM"X Xq"UP̙|T vYZA(/Vcv6Avԝo7Z^n_zfJ=Mb) Mb헷OھZu1Z\z:8T(0yC d;5 =F&h]F_hKe*C:JiLywZf'+y_[Ie^mqN~}ף<~vfUw{}Y#)ߝwi7j m=[譎KR2R [t_=\ 8iƗWTW HF& N=Ⱦswd4aP@kQJX!D[L -lGѤk8[{xⷳvÌdX;ϥgݛ}=grȔ15i&V21ӉJXc1ljєs9N}j\}* xHF`- TA z v3~'A]Wk8ԙwlazogj7 gud_os&x]rS+ؼ[濝s+PCͺ hqZZ1P{'?'LչwcX)@JT]jb)[RЊt:ru%٩<_uM4sN^z>rcuGEjȔ#G1BSb*Z!$ A:-+?.۟+0q xoMMxLB$Zc!P<:UR/ʛ2+}٩ng}O1նDV$[\Y'2&ĖP$$[-*̐ R)ԭ+RΣkT(ɑ:!կ#`i7^.hĐH'UUfկ`_EXJlux&%佷G3a_c,4(tLM 弐==؇4Oh[v0yF5h3]*UqT9J!IfVL+ʬ?tיbQ[8o'q3l.mnv+|+yUw"doH^Mtv||4nz;VDĬOγl,HNSRZJ/ YJr:49z)&SyhϷ]\qW\ Rl@PvhhHJ FHULQ VK {rWs]Kexi|EZy>g'H B-ku%~2 jBYhl픐Ȩ9e6n,;HY].旴V'ISŇR;9ؑʸGYs)w=k<ݗYڹ;5>KL5Nx4cna9j-`mB[.Z^;l(a Cor2 %%h 0+SN'k*6 HL8.&XEL6\Aىg˝zg/orf3$c2YĴ1*gD̑!\gUbW"Kךfc|s(sci9F 05 O8cZkXp Jꭘ|}9{C>|'"E \QNd Pf H=A xHi4LTӜT x\J8s1$˛UI$d3⩣|'L4G,&^@bH݈-ݸu:/)>5+9>#8ARúGO1bsh(9>jGA KH/6):F(,=A%-r!89ө36;HQ:D` ' ƨdBkcpruDB(O[NiJ+&Κ -6r0-ft`{_W:Mzmɒ5*]h/,N]Mw=UwU/MՖ~Bm4gq/m&Bε!|o?,]ctݖ^]=~Je uVͦB,YcɺWުyVʻ>kiSL9:;o]8kQ<zmYfsj%ZՋw?cv\򼅝ur}◝AJUM~:ȕ>1DP9?G ?뜹xo GHwk(P[D?n5':śD%ܪIj"MS!\`>Ԍ-JЦum<:shCN#֕cϰu^݈wM$1Lǻx'MtRSU9b`_+F/-L)/qrV0mvCJZiV֛^VO9>o2 y ZȽJ/^YZvk8Kizkhb#BA,c@990:NpuGp~l*W(!b_*KUkv=?$ZQ W3Ճea2xzv piS  P`*UW},-]+QW݇,+j_*KURp#7Gpî@PZAԮU~@Tv9I?0]^ě9ɫYΣOqwi0;ait~rE8(Y y-TеU,= g7Azu[46ٗm znA6n^^or0$Vs-$\՜eTĤ(4R&-KmUor0)jI!u^ѭ>E>2/_rZ]cnyz_[xs7vl_rpCzmVsWOq*JJoh02]TG8;i``!6//PP?R)5z{+H:Yϧd}gO)yuc=Lb$M?O 'տ]m>w2z`^K%7zG-dCKy޲Q}'_>8B2ԖS e s iV<0NC?h!صCNġdyBZPUI9 H+kp2URLsG*qh dP "I6z[6ObtPaI 2 H2c:ro%CW0*ǖ*il(F@9P(S9R'%7iWkb|4Ĺ_Qǫ&;:6kRl'؞g%%RlNԳrQ? r*ߟ$EP6QT8i-QF$%S$,%/a EFetB2.ʘ8e=_i  6:EJaTid,&Xb#cS,PXXr|=5獾|h#6"6ZLEFf'cj=I Q)fQ0rg#9YMGftBE $& Y|L YJs?b ̙;ڥFǦ QgFNL. !C  fًm*KêCp}U7<4 xӏ֟ :vg6.〱/J F"0[;EmdP 1>bxqG/j ZZ$VyI]RI2ncD )5 @CP[W3<-(%xvxB&ƅhZ@ʪ,Z#5(O@a^]L5A^9˫ᗯCE8s]^~ꄸ즱uC|*b6j{=>i%a{JRzljtq^!$(K Q.6AiU[`yk(9q!x*uQ @erYP(Uڈ[c!b<3)otjƤWܢR%OJ~ g}zs/%'8fV ʻm\nW3Y U,ˇ&Vڨ\'#KoTaTk{phOln:vAf$eZ43a7Hӧ/_U;/WځvQYEcC'mGji.j'"&d2*s:Df-ˈز͂u|]+Sfz<xaB$1SgBlji 7e]ͻ`yd ;Xawm|t@H5ޘ7:U)9uDPmϴie–bZ7"C]GKmmh<2_`r+jBjǽ3aE(\fdWٵW7e93~>[N7Zt4Rp\SI H@HgR[%6A'T3 k:y XA'9S-e51D"c#dY-dWJ5ίsrx~Gٝe; /ɒzliNhm/[nc+ڴw=YNϜ(MZ:KULsi -av8,)+D9%NZS#e]΀eThH@ W.h) ~g ה!P% Z#lJA#Ql9*E)mH24&|nzdx2b$Gb6Sǩ-:P_1 /gZ8BMv~U? ؜\l8|H cԒcpU _H# ivTU.VdX :[质[J(r 1Fp)9. XDt_ qL HФT`%QI^$!;;!"\־W76 ܁GsϨA&XfU\9+ʥ6]7/"ӋI&_4]h#EVJ^heyA u1 9O-N<1$TN-8 HS'J픈HI55g IAp$hudWҶĶ牝k{:!I|$YrF2PB%D[ :Lvp@@< ^Ar&ЪӚzpwB/qi )(mᚠ:(MCK=x\mo HI=Maa,BNGjLbqNRyԧĠ"TA ҹD?kSScyq# 's(aīXr0n&*A ہ9Զ@ 6Jd",",s<',l|=ӝ#E \. BxQLǷFp16EV @^kf!&,t\G.WKqʊ|MjFl.[kk=>ҳgMXspfY [i"Z Rˬo&E `mTXCHx# 5/%pֱ$IO0z Z2:g'r>/#Fl QRgI̽Z<y\GH ֢B8@ zn.Jʘ$L"^ψO9rkfXۍm"gs:!-t6x1Aμnq~ҷ6}}vuSra`h&M>G>@8kߛT; @}vN0R)RP{Ĭ9T̥txwm Z޽B(ns+jY *rj|F_RCؚV}pö, >F;DžTh#a"&%0SF ܀unCSRp/3$g'Af`-36΄9 # eI i'\YB"W. Pm!*NYg<őˑۑ͑|TCRhdK^*ϭWAQsMbAl^V;>29{ ~WXƤ=3/̓* ޤ#yun:::Қárӿ] (&4PHGqf I S[4:pԁィ`5%%M`R2r%O:FX!Q08xt,Q%>"RDCn 438%3 Hs"B,YAo5r#ԉ㜥9/ǨjwD6̕՚<7wd16g9M:J%PZb@80}un3l]YGg"T)IP0/@ @ZX VK^~weg!Yp\i2P !|=1Q1%$D9qo3THY"!(O[N*.9;&aԁʟNz' 5Ayq{o UG[k8XÄs&'xSdb, fL]»O=sSk|HdŲpeqk3f2OG8ui7=3/o0067&w0Vjuԭe:aJs֜lefNŘ6ssҲG͹[GK|AF,\0%R-A z] $x"Q-95X6Knb']d]I`VWllbNGh '+4M"%tp]s+^ }Bgh%Q'9biH*(x9Z?BM*z F`kӮt|7hx- w<[&%IXuhd(6J"IHBPB ⥚DUf[}u܊q%^νm{Vd0%#S xBi! +RF+JJD(QrnqǪ8^aTuΧhoߤP)0K[04Ȯ= y˧uD(1)|II(+L R `ܥ s(f0vi> 4ے!`2onVq#)"tGBA=I4įv^ۻw(௖7߇ CZʌY?L4;(7o{W?go8t7-羺ِ 4Ĉl\Ina0h\2opTѸtd-9>/ӛ,nUټ&VmWFN4^֥ }m(킾Q_}I% JU^qﯿW:_o_L_Wo~%& $ t~2 ?? O G,-Zכ-͵ai muwXW6oa⣘YX6T o{?}vO|>=8kg]z#{B74[vza$JOzb-UJTT ++핞,ൾ饇mˊjNZU/:N8C6ft0η,lpЫYW]W2?˾}\JˋJuтJ _FWV."B}/pr[M*o~-.{-KI yTIC!cۊHy9iBJ&ʨ`.'"0Bg[=b|:EG37}oET.EB6`6Rvh(2J+U|G& oHK?#DDBe]# 6Ƴ@?!(A{DyL<7@dZ>bT:qqdqR1A`GC+RY, JoIʐ7.Է&'v2B_ *h]G!mD= mvghw'%Ԇ\z8^K$j\Z0[*bq)3:SJ{+#ǹD% 8cia 2iE--|$hիdQqe@@ZEw| ( BhPwman`ޛ8,[;{"On.FSnFikk@u F1l& 4%ޛV7[l}${3]Ày%{R-wZ kIÌ^SV|_ef_.݄bge*h;)55&'_Лd49jt{[F1t妉Ăj \SOCg31at#d a bn gKʚL4_ŽYp%0zkUw^%Y83ԧ0LRhZfoӽM-Ks-_] n]ELsGRMKfv:7ӯ&_7+d:^&R%ų5յe^47Qyãiգϣ쀗kj7Q;(T';s^:݂X楙^\] F#gve[1/ru5~k RyҘƞ 7fG x߽kǙ:*2q? %-l6WK>2^$?!M)Y l/<'޵6nkE/F ߏܴ-n-( 3Mbv2{(ɯrd[vFL"QE~yQВuW1ly`_9SښW_2篊gǵh3 wLBOKl.NgW/{~ T9TuņEv=-^H/$oٴ6p;TۦU TyjtP` ̍+2jL:uX*Z!6 3Xz촻[>_hcۀ_u Fz G-QV *-pEF,Ґh&<6qF>tV휇wS ?[~h ( L 1F(RI5C+gk/̈ ^KBvJFNhmlBbg+'nl rL t/^)j~nBecǘ;?-ĺ"s ϭ%>'D0 Fߥ`T .v.J=b'R”3UEEºb`.ڂ6TAw/$j0h1^}-TyY̳r=+LN:9)mM*k{OtA2p ?GKcuԇ}s2j7kIKCI;D w0/ DH*Z),Y`.yV3ć*]p!P6B ½y3*: ˖=pe' !_l*ttf^Obc,++?\W}ݬ4ga|dL,EP{ia1{&K'˃+q?Dh \_^r-c)n8o| SbҹVpb#Vn~Br4ZkKKGX|߮AJ5A08ʝ7\ 3f4j|9#,RFXp4Zz'֬7ƛO/!,/U[ZdCCEewP.d:CqqeOC'}JJ>}m~]$3}f*AyI)MJAkɫC:M52ď9=Ft]6LA8L9F\SUjr ˄B{A`]:#WJW}`IZv')dQ$Ѯ$@ZFv^]%)E]]<⊋WWW X]-%fur :j9)%-uŖPWSW~zlUXʽQWI\E]]WWI)ݩƊ=RWIW'ˉ]]WWIJޡ(f=RWI`IF]%q]뫫$%ŝzaI>]z틺Jr*I):u&XLfuW[VFj٭ Ӱ~d{GugϚm?zBsks$:w sbc+lʗ`KH3h<4 ) |g0>\RP]6,B?~`XȚ4(=ߍ~Aì2K̭F՟<03.WM<yܤ5^Un?KfT0?f˕b]¯|~Ufgâ3IE"3[c81A |dE^QQ@u,"I1*_sCSc.~L*8=&/E RrHd 3âҌ;$XD4$/G# ᖝŀJJ6zd;bZs$ѓ?Ţ.:4cLF  DѾQ cQ %D[#|u"uB2NVba1M n";VRqZ{ωHO1% H-A=SNǛ$78ٷ=a{ l'*%RX$ݘϗx}y~qWn tS3{?7*] C%DmKo馮R `1iCHiMzUV0G˛˻E/W5*A[d]9AV!3UnsE.1KYa1KGgco*,TWEdgߞ~ݷ~ uvӳ߽7聻+r<"]K%Gk>qE[MC{b ͷ:A5>>+¥/Cc=Х`ߏmoc2Ͱ05_:K7avi/#n*|dءWBGRK.7f>ܫ?m (D(\!([JL.0"~ uu7hb#ƠhPcSIXTFf)wJ*oD&W8)ʠ|#rdV=)|mL3$W_lEB^) 1D*%#G#rh!X'0uBmeV/Bh)HCM>"Kgΰ97o4ӑ[77, s\ۻXTVŠas)ϙB.\c #$:tS sb;Jw"pЈ L1P6& a-5G`zi qP!jXb-Zc%xsL(QmGsvtdXDrGk.k%%RHDc5q^ƈ KIh1mo/),ȶםwW]^6Ŧ0'ɀgG1_bUMBG%/19 Tkb$y%7tK݇BP0{J%ęhDd?̅nA# |‘݂!rUX-zg՘IDk1hͭek44.u_XE{@2=3H{edTG_fp~ ǡ7|ޅ< DQg2B*ܼ]QʲK{\(ڰBaUߚWjRC0^9 Ρtt^^\=tgoB2/Ě{JP2Fkjuﳚ'_ 2ڠ橒à_Pb;7^lu4Y>Bބ2RNOhS mRf\.b~^iDѧ[_o㟚Xx~{mz!L{]εm"kkDΔF w}3f+ɷQDb+>{D"p'BĈH֞HKgÄqG#oׅ@ԧqʱdY+냉KM-a$EV D[fשּӐqq|5NW:L3F>^i]>ùYj) Ii6:aX2A00N=傄GuVXQWyBu}m~]aIy>I g6PFʝ"5u 7F%W oVV&ma;-=hsa5bV- tUM{5X;fJ̔jNU3D d:ݳ^=Yط ċ&$Sfu' Tɨ"%h+$ve(:.VN8N7qb\,JdV;i͏KinTn1]X_ wl#!X!cy),NDcȠ(!qbI*D6Xq: !N:HJ̘WcrA wH+ΐ:VRzNQDEP]VWPe/zHYlYPjS)Mo0LF-#+?y~wK$\{QtIb+߯z8(Y|lZCyYSz )jid,*+L8@Z!2J!$,DgRdt^=̐<< OP\b珶 8<3rmN@R=BLyhC[@,1.WqvjFfw?j(EFRj[v~; YI_E>yp7(UJp!y\~D)y4ɳt8 Ab0A"aQ?ƻ˞Q,`UXJut![_Fߏg75djV~Ѻl|.0D;mޙ_F-f79fAlyp=Sƛ.ݽf!ؖeBGu0Q`{Ѽ2'L]I:XLOa>jǞ=1s-b\DU[g6kŧGЫT_e%{yl[..~:WtS7mZvL# -'Wv%l%Ĕ=L?8-1@Dž`lyrf']tDLb/~6#K,.ml>.{ժ?VмpCI3MێrG4Vm%(|[hbwٚwO3Nq]Hev>LWu !‐Nj<l6T#/ҷ<~n]v Xژ).s2wr+dh8^v``lnfG H82 dڣ<%\O/i$u5 LNwy0&&˂5[IWWxc5#0uS+*=.;BKP&mƄEi%N1 eu糞Eٗ!TiVW<ǯS A}H!.*#rPx 4DO%v8Ep?E!7@[kQv5xf;&7}݁rXgza+LouO2)3/ӂ=l>]t _I)aBFiԍ"6BE+R#%E'E^3vdӋ$P`Tb}UC+8Zxc0{sW" y7"PA( 5RJߔ `Mqg -tA}Q2]'_Nd#cH<0qYd_D\1IMZkLC:%װ XWg ^]M{W?qe]b;hZϤiYK ZVف SQR@|׎[#lвлsGX/QiL\1X'J;ȤDό֤J&ae]?<~=d^fuĬʁƐM 1[)m'wt*;}*3>&[nnjdR̂ sC!$ߚ_19|z{=\%]ۜ{A<9ocx#޷OjckfK^)S)rX03H f=j׈8H 'AN`p@BiURq{Vb\PmE] mk@62V#g;2Uaa5 ue,=.;j~S45m2#kcg0 ^]M>^]~m/؄4JaBn=CaB''pPK2 ͭ AȬB*U s"t!c`{!EMil U `+ LjsDfq)32+#v5r#㊉Ԯf@Ai$*y+AHlPŝ\HRe-Uh\L!WdE,aML, GYGȨN2V#g;F2c_싈2"Dܸ-LYpK,-A@i:"@ VamMu5Dp1cI+!eJ3>T2ɐ6\dI@J\vr-r#NjD.Ζ%%⢫n7V7W܃yCMveI,Xa"HyʚK\<.qǾx#@ؖrIPx'hw9D?>Osя.PFG#q35  )*h<V5NGP,.=agO!92&V6V34AsMBS`24ǡ1:)m2:"vu3yBlf~ݥb} luvcTYq3RuXo4JKYDE>t *ys!DpltZ!t0Ir`P]Գ^O'y^qd.U͠|ڦXX|0M}Q{5Ks %X2JT cAK%E!v`%{TJ*Sla/S0:r!ˆ+"Dm(<2 Jк$QqVxA,C,ǔcNϜ2ZBS"Yɯnf%fH]%9o(\A7fiH4MhknIswީz<}gɌedH U>GmA"QR[Aj'd$.RYDI (O $4-ۑm 2HJ$Ei%c(s\@%MTXCR@ cѩc&)lW84K]. %:LH u>p1Җ$tf "E$Ezʽefv;1rK9{tDNP#QgЌN%$IFYf\PYAz$cJ(vO5.k_;uVqnp gr !sv $ߕ|1 eC3qWԯerë߷rW?ёpLSqI>L,=:?M ?~wJeè+Ҝ䢥̔[ھ$KE9w߷heW;hҁH}S]ЙE($MeL:w_䒖u6iqm-;Z<;sѷ|C-ώtf:-^0Gi۰y Jk]oFč[Z]O'.OGoyWR^R®!$2j蚲8}J yv-8|עgES}ٵؓbׂ͒{3JJ N̕ZI! +:<څ.GdSXH$GRZA026262 ܔj!2D_Kƀ2rҺr~RhDa!F1!A Fh5a#WZX4d7޴5 {ʩw O3>3#k8{h*QF)T3^_$= `Gs)jziQ=t\/ԱZОBrBCBWPh%_^>8ɧ8k|aɛq&jVt8QLsO_W F/5UZ[u-$R9( y|oqG.Gg#ޣI~W PoճQPy=:R:E*Q?~FYȬRF!0憬7PěrEx.Yy*c{`꜔țUnT`xB\Ί+wVZ~]v 5|.3kcΗyW',wLl"Xv2&vK.]U&vrU t+"ٻ6nlW tllM/ l|Ȓ+iE=gXƖa4):#8<4CWR JvBJvtЕܱ Ls{pjؐCU3  }jrR6Еl@W6=q^]!`+u)th-m;]JNhGWGHW WdAtaմB>d]!]qAR Ѳ\<ЕZ]!'+kI)thcb?劃ЕJ|=•Bj#+%$ BBADm Qҕʮ6@e3J;1j7@mPoɟb l4 4UFFUM!M)`.hEZZ ]ZY QnEJ#)b+m)thU4]}>tvlzuBUfCG&^bh(ʶpt:oSO +,` b ZvҪ(BFCWCWVSvB+Gttu@HφMOBF8fpՁkzaJ2 Jwtuߦ֘4lʡ+km)th.nR8<5 b bբt(JK,0eBխWW򎮎$R ++i)thm;]!J1ҕB2R]!`]3paZH QU+DmGWGHW~# +L%/VЕXvB:JR )]!\J+D{ҕ֬6֍3θ8!\Pcԛ++EhRMd174iM,f#ЕS)#xD~\]욳K YݥJ\Ԩe]tNbVbe)S ֒O- ʶM-<ԂzYc*@g Z3]]F<%\Oxe't̂q0|/4/}_z6uP'WXdMV9^ѕWW:&sDriig~|BS/ g,4Jϰ,?aksiR':wC S?"a~]-r\Kj0o_meQZϾjM)6 ѐ='y(b $LPVqAF!\_,}kIbz: {qB*os(4LjM]J1ЀFpolg<22ʔ-bJm e~;c[u6]~q0)d%0՟ัvr]fիO?fߏW{,{.nzH!laڛX| x5*g7dݝ{7>ޞnh :,_3(FfǓs.)Hˆ ݋Q)f]p; PÃ* &o$2/Lfk֒ " Nd2\TCH:BR\bB;{`+5폵AM4SyNǣUCw]?q1JhIsdkBGR B(%0ƂTqvHfP`܂BIYb^ .IBQi!9hl(ixkIB^ŦbbX UżL^LQq=,9>/ۛ\ٿOU2E%7r~ n]$qz34~f41r|:<'3Ld[2U`7ˬ?} 4||~_̼yo^~z LSNwmz@}}վEs#v(Z}zw(W}VyCt5B?,qχ$;w zi/G}1}q@r؋l@0VT*Ηp^k/^Jso:@qf;@fwFu!V4Ab֏_l.{:fT 򍽹IwKdٳœyZN/.OWUTQ)žB'aT҇J2_b0 np>Lo~e^{ٿkU?ުI-ߨVK?_ˇB_Gb."]Ig1o;I93׉1fkcX=YP>RLFåWi%[ȷ[q}Q!3 ɉƜmRp-eLE%tyI'c#s2g)\Wjcs[`NUQ-2W!F  ,QkΤ3%s& B<"Yv,nq8+ޢhvTX1Pjc~Xˆ%9VoOi&G/Mscmsf7$c2;,WT&,IZ}J>(zL)ŮgkS; j;V:2x*gMŮy9ο9coNNukt^6^È+k!Rb:/q6 Mn+o&VMs%\QNePfڂp 898xB'2SMFUE_KIn]$KqfAU =2^_&<[# ZcŹK-I+<*9}7A@-5SF f=NbFbGg&QA^ 29WII%Z+3R%#G*/y'///w 弌EXpفt?,'- lA>])̠r7Ԧ[$2LyeYLY^ Ύ3np:Poذ=`د|8xaDMVKdj >!a70 ޹KE|#({֣i't B»TGV1wfr0oȨ6@ R,u'wdLsY3}mB RVsk=mߝr^Iƣіnt]]l~4̻:>o;2}cQqnzNi.H뎫 /4OGv|袯zno~ ߜY3z?\˅vrFkV},1@[RYT%S gbvxR{G葼{aٿ (0J*ɠR֖8" Rk#4Jup&,0̨hȥb)}Dml܆. p/QK5?ya>μ|7sO]OҮ{Ԟ6x>Kt|WP5> =؁"΁}-ϩ/zӥB~O{WzYz%,mvoP.)geN¸`(4qo t K3$hpf7gj{F_iN3l{-ő<3~$wJܲ_Rꪧ#=>_Z8NNc>M =h8M'K,iMa2m軷W߽~>Ox ߝqWFw\=z;~+w OפGO+=g}b͍U,+;rJ_8e)[Du)wc-$lAe#RIy0Ѣ /𫭿[i{Dƃ=0xL%70p탉"XV:ٔ Y堍GŸ-vQ:LJ[v'~}vAln5u;sc,FCc@&TBh3J drNf1#"\ #0<0 YDלY˕GnKmd6I$G|71(R7zlN}#܏~Z%kJu+6KTr2h]/&VQy'ο}?.RVyI\g?-<$2.yT:O[JfXz>lٴI!:>{_3r70vY0xڒhtgy}\*wr-{uVͤp^Okϧۄ&?_z6;= ׾$TbJ{5HW g?-'S 7bڮ/&~g_5Il}a n[OV-'"MNmb9;;+5;_Vj\BZ^j3 و}rm7& $(Pyzm&^w3y.X< 1|hf9O[L'zɫ]ۜǻ Ms8?H|@n_ ꎌ+#9ab2f`dYռV^]ʭ=8|~p"`+&ڊ:l *˛5q;XE7MzµJŖd[|ܩV!jD2Hͮ羽#.;BKP&L% 9ZP:4n /,C~{B*l"Nq C pU!HD4ΦTb oV! |*)1-=] mW7Bl;Hښ(kR.kn.=k\-=zKg@[4KS" [$sih*kœayӹ+ZZZlX\W7ݡۯ[CcUmzENB|>]t0ϓ.Rq>o&YE~ |~6}"\Ozsߏxe:Ckd_tG6WcR 6Jn(3+RF=q8p88ZFl_\ hIeyʵAƬMΑefVkN"!x5wLq GD\pu]RG#*,MTh@ G^MmZ\>H ˏg+s~z}2ˎkn`?~||steČ״X7x4'a8 Mi| B Vz,dH)}S|}EZR# ٷ+{s"]~ٓV]ρa1$ q8,}JЧa.Td&5&ahfڌѽ^[hS޳؎YZ {ʢcA\ .FF!he}\ppc'JcD:$hTIFE&-&zf&c T2+ Ð}HMȩ#W6|"Th&N!lSSY$4r u+b! Ng˧PJRVHMe.TVjlV$X4s2KӼ|CI6O{t98 AI_s/9lt+;H{pVE%2b|sd5"3N 0eCI"砹\ BZUqD{JS3\̒ vYMp#s0HFjlFzJ5,63vB] {/ ,\x _;ٖŭ HoXbx <=}>=ymFlB0!EɀLH%HV dV!I\몈-]yH(^HQ`ST" 6d [Cʌʈ]M͈gx<Ԯ5;vEmS͈ڣnےQ:(H!ыBXws!6{ ) Z!b1x",Cȋ2YšX0"  $@,S,be<&f+{kFoІ`ښQQB3Vo\FD.8N% Y#Ʌ@4)DU(=k/g3"~>N=bEsHjdW\tqэ8f {="CvȣN,QxL+SɔtYsՈCjcWEagD%©}խ 9X/(*xAXoԈ`d`q'OR`>UE] z4e`_}}\aooU?t ;4ʃ7si_'ZK`iFM+G<༗ʐ;E>UbBEȘAxr` 2%\ &z ,֘#3>Jmh:vtaȅ.̭FM×Ǜ. X2K#+=Q55^-!}wfbڕv ;;f%^\H 2pE@[4Xm!VPzԍ gYi70$;ނWC,ǔcNϜ2ZASv,8[TĞ/ K@׺y}[ݧs}_~䄼YC 7I ?⦟rsSyGY2c 9 ܫ(}4V) IVE: iZAj'SB(%I[dbHÍ',Ψ&Ζ]dԔEީ@*tuk6b޳i +t#Hha,:u4MzJYՌuj|.c-yI_K5#O9lF$g|j~ڝψ\~8w# `߯vm|8OE[ᵟ/] Ѹjߞ>쌎_>ȍ넒샳9.dl~C0k(szMξP'Wr=iSY2ݲԪs-}\pKRlTͅ\S֪C\wdސb^Eݹ̝cVn;d+*J9wv Is5ײ?g#zdaIw6|}|yѷQN[8/yönORYJ|0*~JQ-WWǯ[=7_R{óᎡRԢTjOzʰ_]0bjuf)t5в;v(Czty +AEWUKӱ@,tk;GRje>v(# ]@6 ؛Հ˼hѯ]%YztBqI *-hju~)t5(zt壡/:+]2a1t5:h9;] ]@bMnv_n4}}(hszsqcic“ vEn̆ꏄV?yٿǭ^"ژ18QOow҂hzieZV(ٽD E;}iʌt5YtKĞOW5o4=l5"R^}5GDp8L#Q*5=vz+Qj1t5^ ])rDjBW/4JM bj f)tأWWBW/ 藤.\u5~P*BW/,ij~1t5ЕUfBW/rS :RjUj4$tʫ`엣\^h;vJKv"9zIt圡2Еc+<0t]}tφ{wtrai#GOnb0CNYd]9Ň3y}K>?7n/kuW*|shy:]_\ cß]FӮ_b%?'7GK>ˏ߮͐фv{g_m2(]AK{A1#3X2|9h s9 AYUSz]SIs :%AuPi .BϛБuJ` IW=$B+uT2: #'4X(`QBgąA9AJ$rAV-2.e(ؐm 32]kc4/%$0A辬$kPɘukx$ۑ< #7.xV% 9ՠ55%w @6T !NEO>}t??;=y'K8BOeL3fXbM=A#%DFTrtmP!/fsP6D".ѣB-C=:H Hј jլYe9%TB;v 䁀:u )+\3X;D[nVҸ@#{t➭C̓ (EȚ=Bi63+I@d ٗ P?AjDP񨈬wkU;aVBV|,[bl[NX-js4 =lC$bThj@eVު;6R[ʍGox/GTl$XHhf-w$a%0\3{ŷ`CFjh|Wqg].N5cڋwsL3@Հ8]!\w$f=)-2աl d4QG]Cdzҽ$I!ehPA)7iB_No5̈=XUk80)QC^";$q5\Qr#bs"ͬY0MA;hXehuYځ4Dm;:T}fQ ƾ8¸n+F֦R+:iH' 9R bX bp)'Zƈiln֡H,pY=RuN` t.!xCP֥ZcLFi5 mlS0s3i6@:Xf=՞KPg&Ub2 ĐR{OkנB]ǒ?j\uakl*!@6(=< (8:j%aв5K[̀| =pemfhJ7a#U2'YkO^3 (TcJ ]\1Vn5  Ҡ8 ]\ *w]. q@C, c&;I4M](IWKl$dip *?!tOW "坭3!Gx\;pXmG vR>ClVM%nٿV&[Oj{wyr`@Ƈlsg/6_+ͯ&B4籭]O<8_Ixxf9vx m;O.Wqw۶@!Oa폊7Zmg*1BJ9t]Α@kџb)6/ (8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@߮(SKr<, v9N0V. ,7.H'Pdw{'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N q}N@ 'cJ, h9v'@i8^,N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@' vApsXbOr@b@@kNq'C1p? # _Iy1Y9-5Z mِY|:?ܽҧ"#{Z=jEq73zͻ2AP8%(q@JP8%(q@JP8%(q@JP8mF_H{X|- %H~h,%O2*ɖ;x"Z&v1HC|,]T9UN@PT9UN@PT9UN@PT9UN@PT95PljE>o璷rw An//Zҁ-F'F#N`([s<1^v'Ԯk?՚oq7P[̎cQO g~~qwo֫ߩ߽T ]cpm#j;o5X-w;͏WWo~{Cm!=ah7TP`ʶ*V4ՠyXK)gnoVdWipn;eW'SS1thm{5t[TEӻjt,8l9_PT0ʑSj6 3IVIIݑL'5K3+pbe9 ZlYPhZ`4Z`Z~)G*h~Mǻ FW;4Z)CQWFW+ubtŴ+IJztezsnG^gN50$Z;Ց4ʐ٦c+Stت_vS쩺b`btŸ^Iӆj1tE䌮]1T(*DptrtŸHӢ]WLjrbA"@׈]1uŔj cj**qF]WD ]PWG%HW+Ƶ^gbEWsԕ6`3>`I7̣1(@=ci>*7M#pV5MMQ"Q4 f\/fb1;etez{쒀* MK7K %U EWzf, 81bܩ$:rSBu5c銁щ EWD) ]PWh ]1p+E+ sSjWt5C]Yc @>b+kR>w]1e RWΠ+pRtEVg?"J?z^j|t-(tAWdh!P;SBuŹcc2jܙ1||p{lB2Z=Wig 3Sfx|.APǼyp~J3 ʠBIlI &PRPҶ](:-iDfIX^:3kbB DBd eJW14גt63.:)"ZlbʲAKiv"]Q]V=8 8g'&o|:]:w]e0j4bcWihh=bJEW3ԕ*:#HW xPJ+Xt5C]Y)9TWvĮČu[ EW3ԕQobtŸK"+Pt5C]ʃ ]0('FW bBLcbJ[BsUf =-QV/C'PmP:]_v]C]cys Zƺm^ mm .`[QmM/w5ֺ5 >s{dν{-CI߽ݛKj{맋zs%}@նvk_t w/Q8hYݷֽL6.TS3m@]wo>9a˼+mVEI[ N!06h1 SY|F³yR]1,LÍb:m1enW-z]E ]1pEFYbZb#EWLk1w]1EW3ԕ+%HW 䌮)bZ>vŔڋ^DWX+HWʊIrNk(EW3ԕwepV2 [9+uV6uŔXbWsU>q4fi?&jO( k19. F{g#;/y"jc. 1P$XTŊɇ6D6cPmfz'A:G+xQWMĮ\u<~RBjo׫U]^W'7MW=wo;umk{{!:Oe:tbTjL5mk۽ȳoM7woz"yt.+?<7nׂ[-髻αٗ)%]^]_== \W3Q=ΨM}Kfu[iq.ԏὯpZ/߾a6&6Z \4Dv-:FjVzhm{{SP9u{U}gN//[5Ď|ZEw)ܾnYvDlZ4_ÌJ;17 󏽽.QhW7-ijh?{]\nmܾknw]S֯??{ ߷iq\Xn7pz䢥vT IP_(wozcW\:\ۋ TFLu4CGU?ִzm#ޱMYԡfIMkjGӦAk4j0m\^H=7]0tQuʓq_y`RzJ>=:ˏ^.Ѻn[ҦiTncl1TE}`|BNP4)X_ߵXtʷ]nm}1Zā^RM) Mt؁|t5Y|X? \b 5F[j#h}hVE}M=1x֗>Q:ӑ+kzg 3xFÔ3?5.git?ɽxQ_~y|ǁ ?sǸn__R/Wbݶ7}\܆zO}s^. ^l?L|K,PЅη۫ I8a1X\l mWը^ KNU~96.>ԫ *ᮤ.{{>EmAXRS茤Sr 4b(fc$z4ec,wU0ۇxUk۞&UWXYP "4`m;D;ͪm "Uת[*˪#YfjSi 4Vz=6^AݢXkHަ XHEb6-|C^Һv ຺9:kv].vOA\m twUoL[+}7tc * qumw'I %ggѺwT-ў u]m}hɧ*H[*1k[D_uHF_=r6읭;u˞=U]w14JiG;W4ÿmES[5t_\w{̭GQϻb_ے|:}% s^F\x#LQ䁹+ioSmn.9vDd볡#c?Gc~?lwLJoC?|h|qs6o޵q$e/I7R l^acKC-ۇW=CR!)jHQ,s=UտztUCKPū1ٛvi`|.^PX6.r-o]5ZElgwe̸kK|0kyo-Vw:0tz};= a-ʨ wx}*. \<ǃ9L(k6D8Fk$QCǁT[{T$۸tsy*F"XU2cx)w$O!jAm%@r>vk}`Ѩ hf c@_@$P!(xh 2oht":5q))\-΋$)N$J"\@=Ƙ2$ֱql6ݖBGd;z[_i틗c̫zmqϻAu84 3mCFB<.$Vʮ*ˍކQDN~yo=?n3Dp'kh5Q'~0/Z}5)ΐhgŝݚPy1Aw^鳕x6y<Jd-0ȱsrIw죚_CG5tD*yCjZٔO shW6RL2ыW@='$ʱN 9Њ+k-.q7Gy;}*8sRjBD85L~Iv[[MY m^z[1fY7$޻E*?Y?@k<1|i-0$>1(-%+*(!bvl^VlOҽpE9E-FC!j1,OpOdZ; SD ].I#BSJ -fC=p\_F^%'YoNE|+Dn]CGj2sℭO&3QHT2slǞgHi[mbO97ہ6,,j\)e$@$XX `3JFG\ Iakǯ wƉy9{zR*%0 Gw1v1@ɄJyh %ՉKugWڳSu6- 8_\MF|-9ϪAs]}|C>ٮx8oH_xeouYgw=}⪵.֓\;0:1]y޶;3>iߐJ~[غ~ܺlZotfqbׯf!,e9]wzi󩲲E -h8\7 #. ni-~BosҜi$[œP.99ю?ޜ؅9oG2%ZXU3nuLWR uU :vT)S'bw~owKڻfJ(e+AAҖ8"J%rexLHkWj{ciS:ɓ}np[&,$̨hSR1xjcLH6A˝-Q״U7ߕct}}@#e?f:'tYgio讖XNT90s+cbހ״LB)ѥWɗ!&S( sD>Xy\I=8Eb4$I`8NTot=& !"}<Υi_0Ug-۲|=:GIF<149T EIJ1YDpnʟG1@\ٹul2Hޢ0%OAhô:iC)k492(.3h6kG1cSVrgPM3[%V36|H1JISdZZ$B GP"P0QȿzlUUMEI"Ph]$!iSTZH}T"bS23T;f(Ҥ Up`!fc[;{r1n[u|n&9~c1%bn$dOεbɴUܞ??\^.~TRE 'aV҈".yk^L+,=(3Mvx̿idF/nVIr<8C+b"wVqJ"6 XOЅ_Ww|nvM $a^S_&;lXߖ[.pRzZjcIv݊m/B"*$BeeAr⽲1% RTtQJWΣJmV6R̎h8@*|fDJ)pQJ"(=jڦ\[ $v#P wi, N|Acz k}3XO~;s8zGC[/CV>q}=A)uJ)&!Ri1D+R SS3?nx P#&F*v4(L [i@ 'qc( DgBM܃  ڨ<QJOf1Nl:WW2Po\_4] >ERxrLT yأGBG+;GG,S:2|)%aLB:AŒ1% M$qОm#϶utuwZ}Ez,K! 7A*H(pފB2p֋(!" Shc6`R#1Z2X`UqqQ8!('!bg"6r2mxoPDõZ|sҧQΦ-xPgouofíeszbzHx͚gxx3c^5MH/D B,SgY@%PJ-LL2Lfb[] %]jwg6{7ʱ;P"_ȀeNj QG!"tJTģP%Fjz2m%!zp%ןΡ9 "RC#pbxgBڡxREe~)߶\U2v]_o~eMgdT\X'9reyz$R`.ۢErzP ʕ [dʕt O\I-^$rN_S5?TJHJBh^ u%AxJ\Խ/9%VVo満~M95}\$P郊,h47:TΒ48J )H )"Yu^C|Ib7_Ҍ?.~}z1dn|$swJ1μx$y  T1e}gLJa/!{ G Dk*D44RWKœA,yb |p O*IRGKs^(o1"p]R>2e\b}FtsxN^<Ţ:IO{ϩevBy 9 B4F G؋VK/O(lC0@d&cm`~BPģ 4_`& 1^tVG "٭^,9k]B\r~@tK@g'+!:/z5iՅRQFǙl|) }ݘ4M&E3Bop<q_ U͘m~b%ffEh]-guh8l<11Ά[4xڿnXluxIaQ*5|d8&짼5oSdWzdouoNٰʚc(#5Fj^P/qIDpeKg@ęH:x̴5[e&PLb\V&p>X2 8m.{|AQsk2:zbХQ9Ť Ayb=z+zy}q zqS Nvf څǪ.ut]L89$r=+ܰ H_#σ&jا? JA| =D`rVȠ>qS~vY Y `(H%KJq D"}RSj/Y{ 7l)>:G̼.˶YZp:Kh%B!` *KߥZ8魬%N YN@)@׆GN) i%\*8Dc DTI1t6F%3d2s@@Q t!Mh)*+&Ξݡ(`u;`z{edYC'BHF!ylh1 dhr1rE1GA40QG ǿ%h>  Nkb|٭ݬnfi;GDۮd'i6^n&(iKZ*)ܓ Y$STg͗[{CNBGZw]шqM:+U"NZKs'I%KKj#H0ƵEQ#2&$NBO  6:EJaTid,&nd,gb;P,PXxjU&[Rvr۹oS^f@eGoFs%LEFf'cj=I!Q)"67yrg#9YMGS ` f1nq1t #v1qv#Ú.;EmUUڽ56e -P5ƭcd2cQ8JRT|X`&(,zg$!eHzўGX"EfBe Iѩږξ+&nOVg2p͋ɇx3xn\/\}8+G?'?7 bG0'0 /9f?NZvݰ,8NJ.2vLeWjhԥg9pOU>?̾&UEA6C~.Sxylb,jY^-k]jlX2UjsQ׵3h&jRjyu0~gq3? Yx=H{~UŮ^˗ p7n'7;#lКj ;#kz6pv.p{:\e)3C *UJI^=\FHgW(l*ȹUVˮUK+a8я&cgun&Ll%^յׯ辧0||F,AഏVRR嬏ʤĪ!Xd^]pcK;]g>uܭo~j`y:PW#}Df[MgPYpyrR*$:`̆BFh.c c%hcOxjNGEhpgj.q ɫV7*~{kw2^xuC:k|0WR~GHI*v6GX(!w 8$zR=+G|7)txZ̦T:;W%Y%8 h)*o 2V!_A[8Ijed+p#=[O>9d-0{SDҚTBj4{ʛĬs*Yy|. AFN{|ѹu+knKt-@uڽ'\}~v'7.e.K9.:E R<$`IOn8H4&^ޚPb?$g;NOGT{Wn3h{\Ea SMFV턗>$5HѥJ+*B*ǽ3aKSt5]Nd}>34c8)$E$L$ZJ Re9=Iu>E;^@}$!z\NGPeAc`tR=ht25PxὫRŲ$C&Bs!n{y7柸_w;>C 3'zN-Fܥkk%HhWsAn e QsiԚ)c$pQ/5˨BdPÕC$*&Ξcהh@TP@2$j6J6 Α(mDrX4@2IKS_ۡAMfc$Gb6SW[t̡c~D&Eg<Qmȝ -)֫zcT۱1u,丐dC|=dB M)UhQ(Eױ,̷Q+׵Ӈ:L"G&k+p3 !Nq €.xJT )^Q.&!|4Mea,J|F57 9CV(O zr`JOZr ec _uᘞ,&Jas\$eD0UF !W"P%N'[e̿W ^5lѕ6ȁ 5h+{+ 9.-<+0Flleq<,|,%gl)tB9Uec a+!Os"6ADk$gXx.7_XעvF .xP!M$C(O,p )XX1q M*m\ߺָo]%égBtmt ~"hل6|v9[?08ܑ2Y'DLȼ.QR<}doк<Җnm_؄{j>G>dw2k ǚf|ǘ-f|61)CLZjo|fK1K)j!]\ٚ}w+%Zwh.Fbfc=.Zưov+f)ZWױy/U”Io^?3ʫȎcâ@0g0:R*90P& OĜ&/{lvM6}?wH4Mc\v,2 l20%Gu~,nж<8Xq(Xq;Eh%Q'9biH*(x`^H#IEQ.vM8ljRnz|г,= t-HIg14Y4 H"x1D-P=3x")r0). a(JFL TXoNNE(Qr* +m,Il›.OyP)0K[!gB= y? QuMB "I>D$M@ Tw)n0h< 0J !|3wI#8 I6Dh&f_XpHmUg^,N~^Z\=^(EU$G'|@ySd:bєxub'&8N^ BzFbD$a0a^EdڬL\Erh8.og`d5<ƣtvrtshcVTSV|o:t*'~m;M˕Y_~|}~>ۣG~2}꯯^-J_p=$ td~$|׆W?chYjho94CK(݄a\EJ>rϸG1scaWP#@~x$|7I͉r>^~G= *,oAQѼ]7R UF8iѥTIeNwXSڽ'q3l}lcVc/Vݥqc{wj#,{k ÃW5&D4~a͟Owi7.Twi_;nC%MoGqmu1R% !Hrm.CD~U^ö]kd>ʙl1gaa,8{Z)V_$l0Ŧ]ծڜ{Va+3/ܶ"'f}rec8qN2j`2Xg)AIФ¶ZTl>Xnu#=bϓb.EB6`6Rvh(] W:6*xowAE|_`}˃Ca]}.z V >O$'_.r?E 55"B"j] AvQIBCT!N;ׅT4W]$O~GO5 Mx':elXhr#5)"ur)D{ZJ01Ax'-GqfiW'# iyK2,C.֏zbxd%|ޝb.`ۭ tDHRm胡E9㟮8 GZk"EkǙ%%Lh hGO e0HD£6YHpXPRj"BXC#w&QS ZB%pc)rn&Gk?ơ.V9XޝIS!opOq|QUw$7コ-}6NN'wL-s{v/֐hc ˖;u{3I2 )Cf^o\/Q/⊥pa*"vӶ~|x~zOj" ԳٚlEj%^5SIP ٪P0ic< "9() Xi?dq+ 8Xs<{T͢A`GC+RY, JoIqNě} hAI^couZ]G mD Fva{Y/PYF׻ |O3W2RHѹzP[E(%(KӐb5SR{\pHY`TŭJՅeɺ e/ J6nVRZLRěb>N'Zi:~vɪFezIܯ[5h5][bwZGFtE<;_|5@u9Y!&{1^uEW (PL<Ԧiu'(Ӽg'Z;u}I{x̝\jZ/;dԹSsJVK"`2-?7kW/oPYr:׽\6b5p_ceY漥t<&;TysW7l𙝜$QV0n-bo#QkxI~~I5!C7,e%ƣ0-bz~woMW#u;+iAz= ׽>M3;?q:=+R?)QbαyKͫ-.ca_M}m4uEg&fILu-ugFu{l26K8vfk$eFe6 Z+[:6n7y&ztMe tqo){fg"Jd '5Ԭk#o<Æe[-4MFha2഑4q&x̕Fd-ӁZE@01uYyCkų嵨IYJxϭRFJQ#tI€ 3p F\zPzE^@*U7v,T֧d+ ^Ws.=SC)'x~4$rxD)Fzx4ڔ*V֞yUU_Q8~HeoYw]\5䐚C!^Fm".(lru[{͌md.G8L,αP3&J@` { { .('܋(4ӊ2@\ Q8j"Rh2(72^ՎFoƱ1!tFW>MFZ|YuY>e1s<]۵vRv@ZOa|k@Y!$mX2%BׂsAS%y` ( MiD$Q`#e(2PSj0`h.pe\28"eL*F49c ]9C@R D+5"1Y9wLUebǺ{T2|uBRMQx<2[N("qKIbhXZUlHhDFtT$#$3)D<9 C89'tRD‚Z5 aies;Uz?N1>y~;tԧJsnT!ǂA$AR- lB%UZ3#vXӅ8cW]u! c WQa{?ۋM|2_6x<}7=kبiRe* j@81\8TqOǜ$ P`Ν=1βRyJh+fJ6 ,jtļ_ m M 4kjl+I=nF-m@{CCKt!"PNܖ8~Nnktlڢejw vՈ@Q(k$Kש&XN6 ai/ +s2w>EH[CEg 4d`h5> 'H \0G:HcZYY%`Dlm|leD"vXk"Ld@@h"0fn`KɌHDv>#2D0BB2Ѝg($^2XobAl$(@ I̥m/%zD|-hQc:[%jU.ΒŎD^DPX$R1 Rz1Ň]FǶx[C> Uk8c7ʌ"=n~|GR<֭;t_ C3 o\&\ e/_0dt 9-'FRq/a.R۹Kww0wKӝ0.(P&1Hn8QDN耘[\bl,Ӡh*ḻ9vܦ>(%v]+`4׃,p pyXԌۺU-OpVzo  \%iuJRλwppEf]OH"B1j=-\Q\(~ +6lWt=F\iGp@sqwD \%iuJR 7W =$0Jr/px*IpW \QL'vVjo OwuGiuJRR7W f{W 0ho e \%iγ$%\}p[ORl_{f8Jnf,Go:G֎#/OcJ=*syb$e%7ENwT@9pp~S g]LkrCO2=\@E,*^cɡ0X}!n?ՍZ6 #00([#:G :z!' 0͘&p7ڝHbUi8s"&zf$ޕrKA'`"IXbYw0o0|~5>W &l!LR -~pqa,o`Oߪy?]sg?)R1C%R?,m-]45C!,fͼ ‡ `7RJKzYxQ*A[dS9AVG%0f~P`RՠcX$߱C g*TׯnwyşՋ_>?wOz{:}o_?'RB:[K!;4-jڛ4M۠i|oЮDmrC$z}YY2''OW_ȥדxWO2gFE:$I<b4zU,aAT2δ+y2M@sjjbV<3Z9(m'/m߶Ӷ&# 8z}Q:EeǓ+u܎fK%s.4)|3QzŗR:}IL @#0U:K,Ed^[խu+$\$EHR8E cAE%5eEJ*払ӭ{by|ōO`3sĉB v%}"D\fR,YP+gA0ܙm-n{uk67^:EYB 6Al([4#SUs7I\7@Iڇ 7U)LUESUh> HK։5d慒+J?c TeY2xi23Ńtdnj!GImeYMC2V⠀GeD m4 ITڴ0KwTK!wNvck U c*耰 ߜ Yڠhgo21J(GN38=7ZQC"Qw5vq씹'Rz0v)K_-9BQ0R`D$49Ӓ#oFAѠ'Ǩ) Ba43[#RTވV,Rg2D5SDcK)dHŜ(aH5q֓#`<&EO ('97dOLD&+ ˇ|5 o q& &p!<[X)Ԏ@sq{܎ohǞI5zߞ!Twx{]Nblw5n@%{Y8ׇaZ2Ug[tp*PF%uեՎZV"O rK$ۿ@f`NTYW&0F# s>qc(޲3;)$kʪ| gf\W 55*п|Dd1z)?Gj2Ĩ|sD1~XE{jkӀ]mnZm}zN-00_^Vso's0`p *Gppefl0šBֆUMOZXȕ0uKOyiZTGӭ߃.&8JP:*]VWW?ZS̚@w=k d %zn^{Y׾ vWhV9l# be}0z)#"b1h#iLD彸0.W^_w蠷[^~]f~-.VKy*h >evB.URqXt?Ђc`\bҥZ`}V)zN5zHxʐFGƁu.mR$8n%yl.M,%> r uA!1qV{ȁFXp6˻N,4P̪a1p_UGu#`'> E@ B?%e3 >}ϊbiٛSW}Kcpd_sH#n ͝ETs9Z/ƤV 5ӤJ/"At\ 鐞cPYjVy' V=#}lo$&qۗIZv=@bR.@7 QPA5Sռx~v jjK$-4ӒO/;f=g;+7vaGpoџ 1anIC$U2*£HA;@)g(:.VNܽt'qb]f۝4MyϮ>axOʭVvgzH_!43> LO/ve #"IkTm`Du(MUeDF:?]ROdd P'2c%'TBh@!s0+ PtSz`x`HI  EלY˕Gn"A&)$C"R;S".J]#09oW/(RF6%W6k OQK+%"0 &VڍFOg#Ce@1B%I,D")^2Csdٿmئ;|S%uw؊zr[ՋXV]p,$ܻ_bw<!m<Ky_aU)Xʚ1Lk43Iלh*d+vGnnj;0v_/+¶Ɂe*Lȭ;;]\2 ~ҹ\?whp|x`:9ފ̙1q;XLM:~J_lKТhs䋢Vz7v(hdyʵQ{&ȲIQ35H Ii;ccUg>r'wɛQHIe@Z̕]jlS1;2N?]]={}LK5ӌkn [;.{NM?O8?ljVqjzW|t˟o i[ÕXz^U{Eυ**o.af,Ȝ#hjfAja$ˆⓠEAs' @ !nMla\Edq.fAO %hc("rV`S4H\+R8R iƮX+ckW3^Q9ٮ3Z}$L/&_..F_.ο}[#6!R[d ltQD2i&\$ԒKBs+C2 k\몈-]xH%؃(DRؔr(Gɍbg|8̕8Gøb jҎ]QTFmӣvx[ 4J]eټ^H@Q2 T9RjM,,;\I "d (:d kbb$\08L0fY:YxXMxWǮh+#Gĵi]dQQ$d+M0+{kD4`C0",QC3f8i%r F N% y#Ʌ@4U(=k/g3"~9Kpq2g8jZ+.ʸz\qq-*តȃ>;dX AH)‰dJD!pXv슇2.iFՎgE/QEpwՏOS=~#3Bǎpa:=B46C!^ R QQ@`Ut4 T}#!˥QAڐ\xV6V30As&k!)oFŌr<CATi4:ZY.C~7tzBljz݆ Szۙ:UA;cTYqȥ뢱h4HKY=8 ,P|muFM򜓯rItZ!t0Ȩr`s(lY/ǣ4l*P>S"qEY>Mdi9;;Ժ!i"ɀi/BceLp,|I&c48OmSqqKsp}<ߪqvl%ntiE)ZKR_qX<Ь;Ҍ[*za 楓^Z$PDz0r :|(EH9umRV_~: /?ei%Ӂa!'%7 iJpJfJ.mJ/{KEHdz봡iCJHhhCz;\Y}l9{ɋzо>2}[H\wwwX&!H *J̑U6`y^ڣr"ۦ R;E,I$:2lIhOn0Zs,֮{&Ζ]RRVRqO&tuk6b,imR( Gka,8u,M҆f^B6 ӗhXaFTPt|yH=dtƕMiT*}s?G<؏bŃD=\آAq(1痗W7<'쪛S6_mkj/KT^7kS*qU#h5dI4JxU c (=re㷫vTJ]c !!22pF6΂l2Ӥ?'LKzGY53%bqhUBf +!8)2Wk%0:$o\K>:qq 4pǓ!0,$&ىF2,$spe[G]dA`uF}j [R,)h/#E9Rljժ&c Ot!I&Kah6262O@)\B7CK{y}zq2rҺARh2DaDo1!4'ύ6ZR0 ƪv MRXH)ȎD2Ÿ}!I囘M'J&X^}bn Wl1S<0YaŞ%RƖ~ W]^Sr|g`#.L|2:nQwѯR7z!IH'?>`244oSnSQQM~21ډ*SLjWUl [e㓖dEZ|v˖?J[ޜ10.:WSwRH+bej-{uns#^6Ε0BJ4ˮUu)X)%^,}Rp~8- ѳ[&y~=/wFX=bA1չw'A>/#}ƨr+1J`p]Q7v GON`,-|v{_en16.x^M>lyldRnvZC>핗m4>nK!E:Y/oxn)=odjvvg6j7/5G_F-&[¸#>alXQu~B Ӣ濯0w#x'p\䪳nos$/&n%z6GZG۲K"o6k UgK ߑE|m}c{|co2/r/˓iKٛuE1oz%7URvִY9K%깹!Io ד?)ݪJB*7e2θKi6k*4c\gkG~Zh@>TC@hN-K w䡊E;ꭢ͙!f-DzϽ#BMT=%0JHkfR;ƈa4Bfk =?I&bTgQ T,Jv>BnkuBާ,ͳzM59ՑJj MQ Ar#钍5 :3:%pKBsAEF@nOr  (lnڹ ,>bX@?5:\vAjc4)@F&E0W Xl}+I%.2eA Av% _t#fHGWki@e 6o]0K]b`1g#~ nQ1uK RPkJ u(R!J@T`>\6 d&/sAGM-V│D]N0 !-ڳ;!JPA/u: qJi0m+_CPqj{QQRAηQ,f^$L'HH/>XԠDt[! e 1P #qҰI&}EՊX{Buf@E{݋qJlpBqŘAQRt8!a%V{9;6G>?f;ya}Q7u`ЙFL`-$!ƻAy@r *}tlT2GH}1[Zm\U&1:#'y.E. Ao%CJ$JET2R,zGpK`)`~Q &(!t_15lm;Bdm:~tXTuaQ% 9աk4=XO mG5j%eD!v"}IF6h /sP6DR& +28Œbk ) 1 E=`EXՌ.˒x;:B5~ 3`2Бxg6̃E1EIb+>g0qQcCe t׈Bj(crn'tPkV]L|M{j~BhϢ;;kFFP[Wy Qk4㥷:x$TY-w;$aS6i` nQ}AojLk9Nm/`z'׸3Sʫ8mi/N5zL?A.nn8c`3 =k(}wΪ4QG]Gjx5ǤͨyHY#h4v˨ ƴ[듌 @zQFӦ#?AI'? 9k9lzBg 37_;.:)j/{TP=`C(uAAJ@25di3 A)P-vƾ:z?y xMRP'w l!G!/7(oV1Z8R.Ba E5FHKCH4pY/xp`\Sclci0Ih j hN\76xk+fnQ4xҡZ4AUJFm:3Tk]Aj},ޣ~]ےYo5Ϡhd?j7לAPK}4 ŢW:ڡ*`)o"a2نf@2^RM F:,`=[JkxAنFp*qKކRZG7$Эxx@A4T\/R- <.!b!c-ٱP|QŋPpԅ.qf@Bf aE9:WWW,"$R cvot-gg3IDqH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 OoNy)z\P@kjQ=tz /5<۸dqj=%~Vc\}t{PZŸtƥo ^ϒܸh);]1J/tutEeO+Z&tNHWqUbn5X0`eBWHWZJiEtίda5ϮmLNW2:D &*{n-gk <^چ~k%d rJ )?Go?}dt^!@yQ|>:;K9__Πjq^O[24ƪ 5#3s:~-[pˤ˖U ?N+$^si{ԗ=Hk;Ih㷿 iZ[&O)1yj³F}>f]g-|+AMB}28L&MYTC/ś'Ƿ}24]~+x_g'K Fh)B4*4mEo$% vnt{䦮iLTU+ ̧ɒ)g)=Z(G Vphw>mXWV6[@pKkhz(MpXWYr%9m~~X-۴&A]g4"G> % =Ʃ%c%Ő`YNU\O":\!.06=$Ljm{$H묫WU/N/==oO ~QZqu423\\Wznga>Mb:,&M UcW`$ \`~Wu\Z,w\*;q%q"ز'0+{4Z+RiJZf)W(؉rHjWrctW+(W$HqE*ucĕVq(W$ؚbpEr]1SV?wժ殎Wn*= ];P$V3gGr*Ӥd"J*s;`:ɠ$'rH.&ތԚHpurN+Su5Are1TnTt\{VPD83lyR)6v-R0R%YT!w=O0i%ewI$U)oyTj#d΂NJe;2s4c-nVA+o'/-g_ap" B7eO?vKm Ɩ2Aj}U*f#p:Sw Mr\Z)rTj+uϪW'xQï& ևg&80M=wJ>3+UA  V\Cɗ֚qE*s;Փ @k  RpEr+WVqE*ss;\= X'eA"N+1[ H-d Ja:\! \` bH$v:B\)!-pE%+\Ox\J9Lj+-+ W$ؔ+kqdqE*y # :B > HQgPn Ҕ`y1&P̊("wLJ):L!%ś`[N=YIj?a+R |rh[VDQd8$3gGj!{'TW=^0Dsvi5;0;Ij%;PYJ1 :WC+p$ V+ \CLSD#5OWP\\)JU6w\ʍ ]WBLABPHRpEjWRw1JҩF \`={:;PuuRf +W$RpEjWRWG+%VQ8$W3wEju4ʈN=/)lhp;SʭeYMTs?'=|C|vjT.7ҝMI5 w5["I,L>(:ƻ,[Bk#JZg&3`(&BAǤRt38`B+lY!cTx3cĕöxAB XWeGUjg՛fhJ,_G$p~X\U0JS[бIpЪ疉 ^G&XC1">.MWr#pu<+pN+X)"+RU#ĕpE]9B\CHSWO+i-+j snS H޺"u:B\)ǔ kP H UqՆ]+, H(ƺ"+Rip9@m+oe0>1nԄr;ÊZb@\b0MrU1V%5"wLJk:L!s6\& v\\S̊(*w\Jg[<昛q$%irc:q '8M,&vzhsf \+ \Z)sT 8Z+++Y)"*{\Jq%P$\`ɵXW@" :\!$79ě% ֦g:Q P;H쬫cĕfYIS$vkH~T:qAe]`qIBOST;,WO+pE3P H բ;H%WLj+!<)dcb @˱9j{K4JݘSFIr,R c7dsN&l,;SjK<&>Z*:l1'%X jlֱ4ӭQX_y|(py_ܸ&!YqzۤѬӽl/ksr9 ?G&Mޚ~o42P}~.RJi].^$養ǗWmk!9rݿҏqyO8yEMŪWe8oƘ a}qe媺>*n֦^![hYiol ڄfXu2t؟ NO[^7`5Ts#zU]YShx-BoJ9eW=C|:/ c aZ( c,iPuBE"(g8*ǛXb6J{`9(^ zFئU̱A,/,Qήlz9}ua;B<~Gyo6s𽟟x%%úC >!;[rI>Au咲Gw'OOGw;e+E:7Dos58W"zfFZ`3nm 5B65[ks8cm/lG6ބ&djQ}Ic4recTs="zQ E=H w,ĥǶAa}qaQ=NF:+Cw4vv kR&Q>:6Mm0h4PKԒJw ͟?t$\phưW7X5WrMF*Uq4 t juvVدu镞C^M77i:*P@!CN2fo'k-!Jx8; },X7+!N_~^b9_=Łl&!{ ߝJMϦO]R'{qǫ[z8~&ۑ03'n|W6;*Wq qU ؙ+s]rW;˪06Y/r2VMɴ D7izg)?=*eO nITcEb^=8_~/_K۟Ͽ|}^}s'05vt~%@G.!k1U{d>K|װ}yG׺wžYa@B}wqx9bG^4!8+(:r^&q*ɵkUK"X{Ŵ\1TR_>u?B2H3LV]Fml @E(0 ehYքeVWw`p:?8ݼ+S][9h!yhRd6\^-~Ko.^Q[6|m14_̇ :\G |ㆿ'=Ʋ`n6[+>m؄Ξ:ź0Koz6xmk`i~ɭ~O'9rߊ_7U ,(J4.hQޔJk!?(ZOFA1-qz AtDo:L~E:YF0|8p;+}WBGNbB4150' 3RbIH3fߛaa3}]17ؼ:gm:~ K w o6馽Y3#7 zp#4?gU) }ev\wu_wf?xl[ۗ~WZ*iU4@r~9X xzc@Pr=BfnagP=؎[eb ~8,͆ӏÙF[a`OѳlkJfufdӒ6fTG_׫8f?mеfGНcg:8dVx~ǯ()}4Gы~%5g`T茣8:q-V_fy} C i|b^׍< 6Nܗ|Y0`^JeT`}hۦ~.xUw呚"/{ż|m ڈBSyhXJ2TVQV%~^7M^KQ\C%Bq)#LEo*4S˽G>Jgj{H_ǻdgݵ@  @H jgԒm9ᐢ^DٳX۔fS3StUwS"%[DhdP;˳)C?>|]xg׷G}zw&].Zb1 P/q29*eK#V~{sq~ɤ56Yh"t m1^>ߓYR<}}u􎜙0[QXVuHb PN-̥V<5BGZ=9J/~¥_fObH`Iqq鶪k1k)Z!{%~<;8ΚiO_ɳ! Ӹe5^?\./nq<)2ژm[2S37~^Vow.l {p7O],^I@ցCo#,N326G%=t#sO%bO:rv:{8=n5f; ЙM>w\un}f;[]wwχ<a5wꞁ;[I|p;7Olvwj/ajܽ֡CYp^V^mjx#M[ ν)PE{=)Ik8@EJ@?aS WW7YZWMs=SldKal4T#3Eki Cf+up{(NrD'L9J[VEQp.|0lyD(s({ 9rn cHYtI uJgYr,\^ N"wHH3[9e|lhz{Fy)ri\.%e?Y=|~4c+A$ LQhRK%3CNwDC@eF)eqC B>u^`ۙ+bwݒ˔% ieYH j-R+-ퟓ^ >-uw@#f9;^Ń#7*5TeOelضkbW\='AۗڦhU- YэRV6$xe#0IT3 W,߃pyӺ}/P{Oc)L\h +NlJ{yMmŰs-vR:`u>&{vAx\vz9sBBLLe) *H!4YSYì'A˝0+6fA9+OܤLlP1,xb11D/*Ӑ€O#}碿@ ait*~JHwM#7DYJȦDeA 2ADfu+} SxfezG`ÈdL D~&H&y cTm#`qGq^c;i3# V+c=dS&zyl3b̉"9X}5X=6{Qq(>)ٍ~ Y_ŀA:6ƍ*sp!9ry)yEo0ɥ`Nx>tyT܀޶ ;Bٴ;J>ɜyt`~oO<>`Y-Opc+Ŀ=!֥"J/F~z5ǹ6:!%7ӂmZvđOWE֥8n}㗒PV)kw?MtFg~zSpE?ax4^Ŵ.}%ԈhI'vt7}Kx}B|px&-뢽j_ƚᐼ].F\׺ $%vKMNIP,P܌zdb%=$K8R3RN:O,`#^z.B93?eAᐄ?4?g%+3Z 9ˠ^q]!l[Xr;̂ܺЅ){(e<z>ܡuwE8 {= 3790F,b&?d '@6߭;uU"DaؕW\rpV8Y Ɛ`|T2Vg?A\ b58meD"s["LEp9HB-AAi:"@ V1fTqƌUVC6dgB )4hA#Ed[ba^ڢ8%ҫ59uVӒcqUE7​;1A < yLveIEY:E8t˚ˠ\<. Vӎc*!'=n+ƺvܻ&0{*>jxH~dmͤP}$t /gOff*o5 4F%jNy  [޿:<4HQ0ojM3XJ?杜ǫn'tofZ)|{*j:%9..ҒikES|VuU u.x0=8AG<䀄-. FBG#=UJ5q[~`l{28YUO?"W,UscLF8mB"j!txʈyGT[D%DTG)wN&GV+h) jtrrDe`JS'V)^)T&PJ.#gA+k90nGōMN;wL<Nj)Htl`j`Xt@؀Iad~sVk/;𩒗 c[I6:UMMSmk~ ٲ/݆,EÃ0.͋>lu_ \\p~oxr;B/_7E D}L.q5[k˼Gl6{hAKP+P[HԆidRM=z&JE9X[KJ.:SщWI€W=̧/n5Ҕ!]pVZLMޜ#b'\MfwOXptrM@=>TIB& ߱Ii]P'I)JՑ0(%d)0E9RbM%|GI6:{U8PR8_"?*8ICl{j]u[^-fp8w?QbCv5Uʫߔs\V|Py_Y<`g\3 |_*%̤7"^m󖅕_g0;pysϾvq|2 pYif8 )"3ȉR4gKr+LS/Q$y˘m=¨+7WMc e & ¼&BP5;LjnDhd o3717#ILJsGC" <& JJޡ&XٙŦ$.C]YlJ Ŧ$ʘ^bPv!I ;pU+p*LWIJگD q!Ɲ+eևM@pa:WI`E:WIS@spRWIJJzzp9ETtbW .^ m9\%iI$%}W/LUצ*+;î@ZZjORp^\IJ%Gl}]y>>doWVBU EIr2o_eɼj7Gqkx2l@kBpVdey[*&ӐcY&, β7g'6`>&ZE;.3"X [c[mp+ujV; # &4IJ{_䢄K{A`\wM+0+R^"\iI$f$wh Yim$e0C}z^#)~vO#i|R?+\=RZ<븏vCUնc>= !sAx5Ipp\zz9pEdKpERUUVUR^ \11kD:WI$3|Wqy* ,Yg*iW %]H"FIMͬHNl}o2p0)VrLn|?42eGWHjs$͙27ӜO/;'qOo//xM&O45q[=Ez^JV] gb&biqz#fHAQ*z Dq"UP%%J Lci2]2GwD=:㷘ɤs{kUX;̭|4#%~'YԿWX0y  tLEqq=KaZC683*VO|1;4ddQ:=+Ӫ)1{++KEו8q_/RUB - X<\M*~&,?ati@)K!ud\;Jm)Reupa1J.v`~1,泻9;#7+9"D|NHOX|1o݆czT 8xJ=rGQϑPŌI02ϱKߗ Xj(RNq,z̝vX-"rbxf;p-aIVPƆ)nc^rL{~_MTm}֝w>E SxW;FpI+MZ LIZHLWj&Wr6Wb2 "zCuJe,,.`<PY̙C:<N,XVV>`H;rH/=:wցuL zW:uV& @H(wp)BSϘ8P=t:FY^1Tk%4_I&/Jx]?\ڪ+j̈́ea͡]Su_ \b\poxr;1Lo [:kmHЗ`r~T?J1ص7،[&g,D&#[}/IѲ(^Mo$6UO훽1Lޑ.b#6I_ǡXcևe{C3u"+sq%ka'*[{g^SaL=,)Aʣ'*.;DŚX3'c(26m@J b +ˍQH6x(+۾bdzS[@l %.Hu.+P9PVjLV*1tȺ*&%.N-t)E\ň&ew(*J1he!;yu7wsEU9M!L.zK٧WgΆ7 4 cH*[IV)&'ȑJZ \{1̆ʊZ95VܨfJ^t)n ǒWMN{x>x5%EwY5yWn=ԾE-E{Cf jUT~䃱DFO];39- ВeMIP"砸.@ !4"?{6fAO %=Č^DIQs- \&՚*հd슅2 ~&yut~], vbyukgga~9< iB_ i\rx愉6 HbFB-PB* RU[򐊳`{!EMM S`#0Lf`"89̕Zs#㚋y4]K:vEm]ڃ{ěIz鼑(N@Eq9R*mױ0~sxHm1.x8L!L&&H†BGmYEȩNƹxX9uU 0 "V]TFD3  nKD)"3K\$!K@KE|{cE AVE] -gL 8)6\F" I'MH!'M )J>n߿I4՞pqd9Xg5)me\.xo(FσȘQg ,K0d'HDҥ\V\zpx,xXM:vC;9¶uy5(1}DFl&Y[2訬f?qAW8]n>=v2nB|F$"`Cl xg*j dzֳpz yi&$K$(QNiAquVB:&4(e8wMP՜fbڱRl^tb#uF q7}cW~ŲE#d"gD%8c6^+' bɌΒSdѯIjp@ޡNȹFt%jMO-6FB-)UJl;'d7dp\{mSls():s_1xYT-ds_-N[H!9LRVwŅ!V(:(.4Z}e xgtc2o%a]Lˌ"ϳ]]̨Uin!6kMHR6A@ d I6޷7k:Oqdl82i}i9_B^>e*nO(0~"^Ma{YM,jg_ܵr@(~ǩۻ}ѫI9F(bڐ 9+F|_9zdYn %.NWH]M4fߝ~*@Hnb~5 D0Ђ^~77:}hjsF_e8Eh)vh[,KٳԦd&yԑw/O@v> ?m_k]f99ۂ'!ا>yW ];1D Fe4}u|l@/K-n<f amLXvfg>I'CD*lxHO=)%aCMAM)r  OSAZ=i+1p&rhab,}2 O#Fl1@Vǒclb[?p" vOn3qy9=:bvv>P -QJF\l:.} )`$Tj X}CR)\ CUp< -Bɫة-mZ2ew &nD_Y&dhMIa3:@ ѺeXuoƐ_ʴ9Agm2O#G# +഑P{ZszѾwGrvKC/BCk4[o^{fr= iv>h lHNEf$9zyʢzQھ:l +TÞ*n$&pZdHje(9l\Er]̠4y[ša4xг7Vk-6@k)2C L"3LCK)=lpㅭKÅ,k "t82ό&c\B &e8 Ȓ*Бyh#S0DrLZx̂V44EksjN &x$Quã0z%9"05'S&Ƞ)ǣ)@GRbNk`S%ELsd3Y*mB&}|MBf ,%wRbb:4.c}6@K[“"~b>Ef5ٹ"EGyT[Z^>Qؓ7M1,jO *Ei`2AǎH 5ɻ4=R7@-з*{AC8a-ӳM*} !Dl\ˆ)p>eiw0:gp,aMV,I/.;mL`.X)2ɱBHƺ!Q/B!q<ޖf!B-$-09c-yaTldA`TM eps4JHu͜ghA}TkΗ*8Ne"B̓LYB "2am(~l *-r AY,CIId).ڜP DWs. RsLȵJFv(!)@6Kb59TާJ\{h+VIe03~/BF;JJctN>;mg'藿יPNja ~5yq1'rar/k҇m#rVF='h[| Ӻc-[o*_m伋|u-W%Pǿ'C\t. ]ixmrAհ|vk7‘\ӡMGڧK{O-mˉZ^uz:u{N=rU^M۶鈤qLUS[H}X,;S/A>Ʈo&Hō%\v>z-4w=$&L:W򜭱O?NݭF֣xF51#ρkN'Lk˶̍|X-ā>S~JҭUMdLޙ:# ;s8% J+b+g)_t OTe左_|M>bbڕ]v,4a^ds3A+ ^3E"hZP7^ԈK ZeEHGO.^bjMӶ^F}E H$l©Cx&_^uL 6Ho aV۫^ RQm?C߇"OG'Ki&VБPck[r6Uc&GVcR:Q!jyoaK 2mBwv5VemEQ^{LUT}yE%%њ4n1@6tq_}L֑OsY+)+27aVmV>sqq'uymE-`gg{}?[  I$j!M?Wں^~$Jdۀ=V9j^6 fwm$GO\w0.6H89,pab, Iy-)R5IC9UuuHkH^ y庌 akb:0/tD;eإG%xv;*^.¥ThM cpE>0J9mb$nK \JH1KYf*pZkCMIy;#g}Z耮Fi1qd5rrusu1y7|Q}ggg`}z<Y(i/Ua6'49ԓ>={m&Z.w pzG[JiV3f foWړ 'I/ȧ}RP4R㤋L8:k+)|ĘdEGy.GynGy6G.)Uِ͂/6fAn1p2 UJJ)\ NOvhlXr.2҃u3&?M כ>W7vݸ9~Б%i!kt(g+_>wR&ke&pxG'p_pw}f9H#w*g ";cQ:=$Bkltݦ#!FiQDI0#2R ќJN ]rwFfpD:qTW hy572- s< [/ӌ[ n,Zk; sc. "ݧdǔR< o'۠oĠiFS g~ iO %d'f_VQl"XIu4 KBCϐ]D8e3dZ}8GOsA:P,T>T!0$0ONJ~h!v;DKLh ٹF"'e!YϘLe0P+DRn]tr4tȸm8u'㶜ƭ9We}nCq(pFŷS>8Y{=ՕjͿx)erR*JjlμJ ZE ^~RZYz,Nt/P F$,4XJ@&fo5Xy9D)4)ʄraL)z G2,F4D'PyU{;#gk̀$aC~s\vEQ<플s! c\]I}ɗKzoà0y{s]LB_2b3=:xfTTCUZI)^_}>D^ekL`1Jp2}u.K\޻.^?/(*V%0LKs=֒(cs`z{okTi N8w-숬B/_ D IF323*j"LGrV ?{x_!yyb./7FPzd}׃mIVnyH5e=\;xOh&Ex:RzЯ]]df}g:r-7I,zesVl-^)mۚȵ$uef}ӥOmЧ!=zji: ΟY{Y7HOVdpͮ{->tk\%Z{{w6G4Op"'38N Qu.\5y|>ֆ~k *ߵe:gtf-얓'2ˠsu&EZ\?N T9B:m9hoSҊe p), i) z!T1qehd,ʎ1igliXεAQ.Jd5cJZ oxhuki[Ze95m:&e0zɌ(t.iYL kGB` Mh+x^;wYR#a$F'n9HcXNLC BB 0Ro cPĸӠl4%[XaC.{ h%ɑA ("r2,=dO ‰۟kkFfB` lX1אi$+Las݄{3\Nԭ KDc1* dF@ Љ$\fP_˕2|Wkrcp=9G5 ōPZzaӛ~A}E.(}k _'u.n0{w$c-=q[{juOmnjm7oҤ`Xp4m.nzƷ}/uW:d[cUjzu\ܧCt_P&7̽e;NkT+QM<۽п:O7~z?׿?p_x$ѿLKg@$Q(u߯y@׺Y-֪Q7Eu9~xjcP-@BrA?"5NXT ` #D64E M+;j4y?Z1^1}ToC7.L#?L"ׅ@yqK8{r1o^yI2SRװD9jKjbb/&WK+Р(Z[<),c"p1\ h-{~zf)WozO<2NWw?=?,N/k;\ƚ-0|SSl2ed]<s!,Gl[12w!^cNV::VFM42 c.*w'`ҕ:୉ tʴIYd(,<ur4NGokm':%;x}Ƈlλg=DC;.!@+`)g/>#JZ6hҤr-KTV<4JNiR4 }f!:q_agu0rܯzQa+xu#Ǥ`hU!WcQWZ]]*-wB=tUE~,ꊨݛ10*v~ԕp<"uUF<uEZPǢ *T>P]IaQ J*j},Pk+0~RWߡR2G``ǣ E]j 𤮾upvLB rd}ǫB%?B]tgHQ])[r-wy gHVaЈIm@S7S(mrNk:ݿ鑻PE2)"Po2vy7zuNj<㊍Tgk%JoFwB)wEVpi;&[-=ՒH a]"޶^mowo:L8[U!X%6UFly@sD(\"r,;LK&B؍kg{Y<5Na1gwwƢ,_6ڊDA{Z0 A IoYkJM@}kMS% P ݡvzwtb+gӟoTUӗi(r@<:%x4VhWr&!*jՎ r  B0Ќ+>/S څ]IkmAqATVOrVF6.J D!axyy= HK9+B32 JxBR**t\R{brbڂ6"ae̜k n&UHs#|9GON 2d1&]^{ 8%JYNIe 1@P\P$*N1! nϴTaI6C@hIp1gkrl4ƬU`ٔ {i {M\H< H(d(wTڗn)M.\HCfX%h9h0{Ғ{A'AXR)&Hָl%T0,,5.R@z%JJNZmZ&% 5$lu}m0 LP'KB:Pg8ms5$т0AZԲ䨺$k+%y }1,g!J2Ӊ\X ea"`$ ҘxvXFЅ&ә;2vLlb%zȴo#i$3gh.!Zh#*QXi<-S ZN( !:H-Ha-SKCL/evZvaȄ ,!QOQ#^UX:dh, YpXd+3:6bIkک6(V4$%`ִ?{WǑeS7F)ƾÌamFbEs䚒k?H%fsωsӨ^1Lsn[(D]'U. QYjXkY BVPB ׎]S`P&VqV:!zRcS1N-ljU!Sbf 2b!]Lh(pu q`V$57!:i*dL'Bh,Kq6PLF:@=--V0!ЮpYV Zq3L @ 2X "Y("3m؍ttg-JQC(]e֜9 qN0 \)!bD*pPRH 3k'D1š2إ.J  P S1 F4GQF ͤ#kxGd)$h~GPS!Hv\F)^QZ#qޓ.P՗Tn\Ϩ`3/ߕR^5 5$Die"Z$$VS@ Per9A!ʰ*4Q]j>|p.#i8 xG\5AthG1#/EV̺(NcDBmUsVWC>m9@VZLAG=(]I4TR4P<9?_f`!39yjN\ 2QӪAU k2!a`5hA$>y _V 5ȤuՅvTg7t[Pu~R& :Qh%wV4,e4JX1lP_tВLЍ![[TM.qs^yt9 ݲְM{>S֫ER&jL` c;V$L'AHywC(٢mð^k Q^y(Y=yp4vMf cNq__ rR{R{ n^:(5\Q͡ xy܈BD{=D+YLw]Pz@,(FQ-@OOdPL;PqKpiaKh*TOo (C0c@'7 ܨ3O|AW\V2 -LtB)E#"Hmr3:H,{aǫw:P)Z`pO +QU1t6nӺ`faƙ݁XV=ؤ=ɗ Y7/Q 4(-lF]A?uN^u~5kW!*Z_><э7[@[qgQk +@(|t* Àe+M ΀z2)/$O38%0{2Ì $'՞S<` Is6ՔVn˥!R`PrAuf5ؤU|$Ct.H,=uQՒ0[4yH'3twE#C L rt o`jdG^ۓ>_FNvU0+T-g!Nq![K >j*EA+Zf^>]ͦlhJM[T>\"Ϛuqg@6wiwyN - N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v@:%' dN9~'@ @b'; N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; N(qr@ngq! Ͷ:szN /@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; Ngt@Gw Dz+N 6mwyfzN/ח~cY-Rh~[GEyݵ@ڀ4/0` 'ՇB{?bQPV߱T {2xK@\j;(v@Pb;(v@Pb;(v@Pb;(v@Pb;(v@Pb;(v@Pz>O{uCyojJg^67ׯ6λVϗ  )aw [3nhUz-6l=CVYCp=ibWfኢ/}pM4fI3pEƝ#V+E<=/z)p m1pW6uaQ-+R WҒ_tJZ'z (\/v(( W.+aWUWWe WZ+ x ׮[/)J!\ѭ/p;Wh֋A)gWh׷+jQ{o.ާGMgxt^7vGߞ s\xb?>zj_v0CrRUMvg?#w_pVlWUO=(7^7d,k-_w|}58y/ B*J$4KтDMl*]tp1})%Z+T95pfaS@OJKzKS_7ck}G8߸a /3Dԑד/n) =)^mQ.?-ih2eXJr-r 5; y"=!4>.''Q{kd/:,=ϑQ̋_g'f)fG{͇H~Ogugm]W. 86t3/ eO=/XZP/EZm^Ý7x`ǐj4aXxڟi ?֎Y_kQ8$a`C骴X7䞠wQ??Lvߞx{wwL/1}A<^Ϸ luD6FX.8c/>"/DY>_3:і|87ֺglTC汣W>覥D Dg '~Mfm7ם:_ǹh"}}iw~p(/躬C}}7ہ/dr2C.WiEϞX|zK~qgkyoǗooo&z;,D~Nx*Iit:)~0289dp%Tߛoi׫t4?)z)]x){)η6wvzwֈ4!f6S"qdTwj9=Ȟ3ju/}tN)M8Q' ^Ӣp6> ߎgIdY$kʶ *PK ӣ)bN:K#:gvL*VAD] ;5`' n!1ʎǢ632w0јxCuT:eEM \ڸJwbJ^ITr@!JC--uDqPb )nY!4x8Y8wB߶YmۂcO51ENY4-h) {jvɗ⼧2)q3!"N݈Qåa5is,y,.q10.2.ޖڐ܋5ˢz!z0kpHʺ /+W>DsԹ[a\-x8Yv<x®X|e'Es5p'nxfK`}|"_dŽ=]7Fŏ6a1wo{\Yg9oEY!bwһb}hvE[)͖ӓmN<,q^^d8QS6jP}у* 9y3K1,y3ݵm"LU6Y筠GSY&Ltf#KŔ ӤS<de¹ =-ڢ](g >= -:T4BMC8Z~fU󾴆>#[k h{FcRCu>;Զ&c AU1'%Js).c!Т-)sB}^)[N);Y8wy`}d_ߪ_7kظtRW \S~iFU&Sy0!%߽egQynT{,/W 3krdQ Wp7&~ tuܫ5nj/T$n>S!HY5D(:7oVrՉPuUvkS #rY_7;׀n_b6g6jml'u";Ң\Q@uْ0N.Ru$ ]V06ZpɲҩpNmJW=5u-!i)eラu*!VKW2N W 5aQ+25D5ɽɸnQ[qoJΡ?8墲ظ&E{rGsuwi|%_M)tfˋ4ljۦ>4:oPeX5R"Ӆ8d͑Ƙy/ @EV9zbR{[tPMf(FYJ+X iT(Tۥv9jNޤOeDκ v0U-o-cVֺ R$PN M s,"HtXd=MӘ_@ _AvZH$W'1/6akl蚵4NPʷcRqWL+V:29"U)X^N(Ȁ: jxCipz,tLE蜩r!!Ԫ/nC-IPfS$B.%*dd*"!9dm'=TdlƷO9 lVYV!xSXq0&JUlD4G?XQP!Z}C5|.2o HU@lr`6y5I,oxN!)葡Bк!E#th3ӄ(9F⦐ n $Ai%yE*7Gg>_xL,eܷٛ}K4 d =@ sY QBǙd0Ϊ&(zGQ˻ykq}uyo<Ûx{!iLIF$:Ezh1jKe) 6h( VK&O\ilCj̼QRфP%Ƞ1VTT">&; h\RlWbuT6M JicK X]eRu9x+* ED.dSllD6vC)mEmB[BMt@ Ё%psN4`*9DQb €ӂ)-!Q5&W4SfZ7HwҌ0a㐶\Y3owvQ}Wlu=j@&8yjw5|0oE]w=99iQe἗XϰGE*&sx[{4(hLؔΞW=.i~07\Nwp]{ oAAMZkd,9ٕ=Z<͉Gޕ/Bh_i~d_-?;7 ik>! Kwc]޳܋(LwVO;~:LXJhzgջ618tK >g_n+uTmER"i<Ж N𴘰DQ*KEE[+j^;]cL.hb, %e9PbRjK1X"(JrQ ZYoVβU2J` Ub}MݘěK-`٧qOxK l@KykCiwUhm K\)ѭ*k5̺Z_DsBDe2.ƻ߈+mbbZ~#ȹ{Nnݩ:>ar~6~[sI-nV~8/ZoYf|C۰*Fmk|6pq}Zlʰ̝F,^<U u&K XZ2ƿdf=HfgV2F %dJ >_6N|\Xb5x$9I>`^M>/@ @ˌלV7쒁m݇%0AfXMqˍw=gF^~;RHA? ځ[PAzTd-ulPHR$m]Ebܔq%Whhùԃ%He.ڞl᭳N|-)|LhQ/uJg\A#W6/"Ԫ-Q ^~R]ir,%2*x4kRB"6PSp JKrR\tWx"Qn+D%Щ1RWе@pDdVwgj[ґ1kVZ"?dV%)[?b@csjwJxծ sXJyF2Y# _[<9.,_] 讲|u}NC2ߜojjͯt9 Pgvګwob,u"j((f%bjQŤ}v>=ERej'm`̕} <@>wa~ / >Q#7k]0xjd3,EtܬĸXI1hܓ էxO4F6:?~r1.Ɇ>E< vS*ߏeqnڄb}(4Bǡk |oZy7\>m]ݓaJnȗX/Y{o|rh+6ݡcͭ~vmv?6ms _-ajzq;nf;py?|+R[՝/}evw~pv}rջۛ܏yy';ntfeY-.:JV #ZuHHG] :yJ6F]wVP|c~۳T)I~DZ6i}E[ڽj; + l>R?(y c$&Q)~18q{qw0ɋ;g0`X&Nj פeVIF(ElNbQ*FZC TO/cDfbRR60@)Y,)`6)֖\!Ż ox=<`WvgOry$` -MtN-ݷޟo%xYr?d5KI'K5T6/Ũ$2ⅠM^kjl[1]Γm{UsNwfCe&b3"(!ӶY7nW8 = Ŋf8XAQ6}?\۞2ZIkQ"+~vf&G3[sFǿ /r/R&NmJCR kԐTl|=/@&HPDCb tb,=X;AsR C\M&9X`܂rMr͡%ais_Yi2f2uHNmb /6zKy%Sh|3Xzjn Ȍ'! E0)76rZGonW ~s{3hFK:8ừy^$x2v^9JNjCQ BjZbG[a0Y UfކyxR?zW{U㬶UFnuqu^,[izcR_$OEXc&<]W&rN${ oɆ o4ۯ?|}ݛ߷\ïo.y2sy7܁yp0Nj$NIv9AӪE܈ Vͫnj%״`hfW+ C| ۏߌwÙ÷''7P(Fv"#| J>񋟀l˄:ţU/UUD*~Z\")v*qWoͿJAe@7RwfE?01oeѬ|2[` 'ՐHw%!?Yvd͝RYa%ӝ `!myϋĥ/?V.oX6LڵoZYB,QAs_̇da/`.;t퀯ƓgQs1v׾0ٳe_?|O|aS$l7Xuߙ D",uy.$'YsT0\KS>RJ`ѕ4 XJY}zMţ|P̞vq `zش&W"p R/%, V昛Q F@Uȡ4Z8gwA ͎÷lĝ {oK7y8o>k7Bw^D׺ݻhtzĬ}ÔTBU>꺍sn@0H<"$K3Zu/a#t|ίqܮfi=Q^?Z݈"t`Z;P.K&rTfNyt47:0W轈 4u)|BIYl!Y0ԥb;s1?FiǛ⸧Jcz{d9ce6Ȕ{B JLhrsuT%$qc"+a/D\  =$.g0%%ZS!*e*R\A'Yj[F!4̥,>د`q泞~>Iv_3fpV|zEj PgV\Ld ^rͳSQ6:* _O{ӃL+ߣ==y< "s4g` #LZ<9EGQ0'Ok `pܿ[#1p:092xj:'cB)>U.%9uEI8=x?[ J̤/&]Ӣfnja819QWvɳ7ْqW=<|Qet1tj8ܧ4"O=kd)7fe9g,(1[iaSRrʬ)Im\BF\ cU 0tbk1,!yJYiS`y%eBEOTz_R:Wͪj\V~T:./(Wdʓq쵕:e: (o|<)<Osw 襾IGE݇2l^}|w/4`%B%_'}^|{jyWsm<~YcoHa =Iq-OFCi/ѡ ߎ|]ם.M:pD1U8Mˉ'Å[L12N/T I7SɇtR]tΓ,y8rG0;_>]Ŭkt_7A/g3[\l o*0϶Sbu\jw<gO8I6'# ߟrkv̖GA럏g 4NoxQAOPLrU$%^$e8Z aE&wV~Tq̣_~[ʘB]מGgq`ZMzBp4_ƃ[ ǏohPqcP8M?.MЯuff4G%Oj+g#]ި#W߫,?U"n=kn VW{PGT|ٽ.tWe==ZgIUEDV ڨX/'cf:#kV#OD\iwW|m_Eoo~D)y< ͭ&f1$T4L:XKDF*/Ӑ2iLC{Y<ْC ZGn +ʂ92:8bКZAT2X +WeC?Zc*z@G :*e6RE,+ oE󬊸HmVJl!,vآL8T\:`o!L?@TcJVBuA&3;șd3]nd=kvS=Od.5NU;YqtKw64(Tj Q{!RKϬ>s*EνP%FҴ^lp{v񖫄USީ5tWa8|:I5}O/øf?[; ,f\9zIVGn>CP3RU<>=752]!fCƘt/w SPW[k%1QD0* `W+^={QVC^ IgoosCp=~E0#]na7?vS[:ڟڅ5J0/ e V sK8ϊ9Kc_ⴴ^l#4x|eA {lV޵-m7yy[^.nٰp_^isQᚔ'?@EG-MiNdz@rKmY+6{sxtґI14 Ӥ2ſzk1""\`+m4BĂ+k!BTbp7z~BbF U3LWԊ᪙J17qmSٯ" ~s-\\+cuWRWG+dDBJF+P]TD:B\q W  P.5 ru\~1xJ"•D PƺBtWRq%!hRcj嬕ܤKHVA7?{UTw0r|Oǹ"+`*1%Qn|\FvL3vO`|邋۟/8ԖHx22Uppwp?z܋/fcԅJ(!f7 MA\& |]};$'2BiS%j&J ;{3%l R6wLӈkވ[T(m*®oGD]vuO[>Kp<8fI'e?B9]ɶU/܎Cn>=v/Ń;'G}mpe1;\n;\ . \r?**v*Tp> \2d]C+v"*]WU!XʃB:*Um[wpYJ!l:*ry(pU]*UzpV-H݋` WDpUT38 "AB jl;\*76{h83܎{sw{^{:9g8-h Iq5-?d/ޔbSqgǦ-?Uiu; `L@gVCh~@iu.glkjjo=ET:6/DO'Jܱ,Wui<]Vzܩ}2KD+R<5MRD (K?yVگ8,`}}8 ,o]W\ -/3\+ΏT ы"0ϗ0I}f?yEj -'O $Mb^?YuE){&X(dN!J2s,g LT֩ ,1Ul$7VDJVg?\CŘOJxϫwU$&^hsl8kEI`J zpnVAyc%W?l)LQe~;G%L &s!㡾Ö\SŲuKgkd./)}7&[ ?骞~&']t!+)d/77WS[Y޾KJR}usx<W^Tu:Z.4נ3K5Fn%`@& -qSEuNrܥIp'gǘs")Gҍ>.tTBH ffxV)[ڨy}9[`S6cJpH3ibY;\&M^(ٰ5FΎrV_Ռ:z{!KfWa%՞(;X;e,`8\fKV~OBb2Ǹj*ܸtY0W \o(:(b%ӕR^U@kdRI#ɠ4ژAwl͠3-ʠ3]ݖ! -Rl \,bA7eSd3O̓(\dhUՑeH^q%/'oւscGz9e(ba1rnv`6!M.Ԡ?nln*=t5gɌeGɽJ2D 2Zଧ;*¸fCP&Da{D.KRYD 'MB$܄d,CPMsl1A:Ii(@"uܗsv`|j6R2) R-NeMB&5tZB6 J>/9ifb:.D0@ tL g&Oq]TZjƨʚV5S*KIct`+ Hҙ,4cl U)$(ˌ*+Xdlq-pY,fmkA^8k7k48Yn4 )F$+J޻Jq#+dŁJ [H}'L% xKZEPɃb7.X$"se89vBȌ ŷwB+4" #AIN4!c%NJs~,Ǡ%EV ]is<%Jh1)'tD7yx.9u)dgW@D;E4DHaHBpL䕱i&;J\b.E҇CY Mbd"60&$HmLV{l5ciE.xU8l4Иt1T%tL2}1Z6ntϝ͞]̞I`Rh!KjQeBS %L(V0KBBe 0 0]b:3s۲֯zk[_BwvqP,aؔLi2یP 12eA$h,S@Ѥѻ_$-fab|1scB*saȅђYB|_dd_kS0X^đg`r'TY%7?5[)pxk-b}J@o4$"0S$R9zu ƤL%*F*-F*]bsSVhڭOiÁ[B~FJg,~β%z0RVNe/AV gܹ(LJN\ !E.3ɍ6[ǕWفe #HH]$*#W3<ΗÌ-q=a1\ti$IX -WKADTs"Ät ߭kj0 ,0 d`,DŽ<i=#ӐJ 26L!N۬ :MeRJZb| wVd:H"$$sd6x!BW\[oGpl6= 3#^ ױRӑdԻdaߞNKKx{5t>{ٴ=?eOc[ZlmI[:֌ތ[f Gu h8Hoon֟^9)*.;խ}ZU7uLt4޷q,U^#ͣ|RJ ^쟟|_M|/߿ͳo^½yo^|$OKrt3 ?݉@'M{4mj[6-ڡiЮeMvyKƾYǕH׽f |r]Q.Uj;~Qh[~ziMb~6(?EWRUv+pZS{uUԜx3wފ~Oh}k^Hv#sgmWrY CBRbz+01v̽`'sO(pE9E-FC!j1,O䐂pOdZEYM] ni܆[f^3%g'% {Fu|ncS̗&١2襰H/JH*ZX`3JFG\O/QzO8'Sr|/YK\(D"h,YRhrQHbBJ!R8%\'D/(18-(K$F)R<`geZLq!g e,zBa)R(9H+%a i.Mc;v}cN;Bx00Mj .ϓ!.:]͉M==a@~,/N U78kN՛Qs!*m+A ͢\ ߄#vEgIsퟃHfnXl/f9UHb>9> 4{Oм}@t _my…Cúuy 7̔n)Hv:c8?CP}(&jɞ;o 뼺8̼< lggmv .&ju=g,P=gӻ&sMlxX4vM7sR1x:\̛\e\vJ]@֎]/)!,F nS]ll:ziPC/ln?֛[:]ْn7f!eݻ4zo{oZndCcy9[=_qa<0Ԝ?kS;o6mӋϦm+wN<ﴽNL;mu媩Gn 6*iu,q˘Bϒ:P0N-'h^܁'ދKOsԻ,4S9@I[`^OڼY:8(ZceW SKvtau\h݃6 I_kU8%Vi)xČ >E. {'6Ƅs܆ާOuui:V(+mkmv{M[ɶ2]Y u7]_vxpYsדC, GB:꼀+#T@lѢ.Y`z9okʮ7 T-HX%r\$ B8K{E锐ȡf%TInh~qu> \?հx^="b91l~5!B`mu}77O>ל?gXA"OׯIGO)=HX#f_/`ybJ/vl 5m9lz/De똢yG6.JIO*Y y%TԕSW ")qQw>/fLKvcpt*A T" 1͍NɤrygJ\AR[lS SG0͢tJwb۹~~mz6o~Nyt8yOq-H#!K@ ^R)dDΘ$2%$ 1B͕ r7{`@2É.)њ  TER@;"nXC(z}=<n䱻5[];ve^1sxv(Ab $' QrͽԲ@TF;<\/xd:=y> " 6!F *6D?!)+oR<|p?pC#8{axZ/!@ ׉ Xz-&Gct*8hhk%l !ExŴ>,׏)e]ktvWQ8d2M/"E-.<yw!  k х038<:HN+NrrB;n,xBRREwD#aa|L?ʱuQ ǟS̚8AjODP!js3Pub=1P?^b%TtQNJ4@nsaq}3^C'nx2u?s M{YQwY"T46yq햾_3lW׳?^'.Ώ= h=FyNdx%:zdO ]^04=1r]l`ܨIեO!ROÒ Y-eGmơUnewK.܃5G7آgn|?\c[: n1M]w"#jd~aI?_N/\v?W麩a05rlr}g﨡=Fh\wڌ6OYІ]\fks+^S, 5`ؓ`._>1{_'6ѻ~ $)'F5F80 \86ZG="uڂutL.o_NPΚ=!,f8[,"2EΛc४} O»OG^'eʜ (2\lmvw`\|A,S~ SU~q`刊-)\͘x'O#sOov ,RqpqLΰMMy3r4eБ1伋/;ewŗe%6}7ڿt G0Od5-cI.T4q&kHEILN)v(sY< y9yn{Q DE-B9e4bКZAgUa*09ZQ{<C:g_bsce(Iɱ/KXWT[_䘍Ԙ`3`5`UqQ(\BpJF * Sb?){f]]ϩnb,l?7mAXtxK-xW }}9KUm\c̏zָc2<3l›qQ o&߿$!^L'! j/'UG_~gIkzr)y P/Tm~BB]z֬+,jٛw-?|]GB /UG@{Oiht8onjy1,=}V\l=-RA//j~wйz3ڣTnT[d5W]Tgc[Juz6".O^_A^f"<WaΈFlì=Z +m"W9Ze]LU}u˙}ɯ+-DZ|/\!A8Z jCi GcBM܃ ?omTD mr+Ȏ7O^&Aid /kFe^Zh3v?aIȴ[B2v+bLƲd+G粲Vf|j#gYhn{rC U%'qt@걊m8pJV:! VH>:=QR"TRLGLN ༷T5N4ٟҞ1QF>'BR~.vPaJM$ANH1dhjSISmfAe*Oj@d2?K?Ep,Аuh ^kbBaE-&vEa]M4FwnWlvwlz@? %]\*srlxm[#Wz>8v?ZRjEVDwR%sDLq'A`c\9$פT)C聐#tLֳ UQ*KR8ۑW)fƾX( c!`MfkۛMX96b#Qc:$IZX.َ._G\pNi,Ma\4=.枍l2Fw~mH*Fc߶oEVt?,v}؝Ƃc;=I/(N'-[t#u-_YdWx)MwBZzu"͏B._íEcy[#Stuyj,n6ŦLJݏv?K;'~xMtn߷ MiuBy&6M9TotCtG;J(&ɪMA &"b]j3JfY$̊B3Rڪ VgIX,[wsm*|mbRtb5R7e_59yS..޼ݣ=O9 g/;!9z胿)yAgnߔ^UƍDZ3U39N!V\Q0qeWWCj;J9CĕiN +Uj{q 3 j7\ WCquHnF|AUg炫VJ2G\ :#\A0}mj4\ q ꎸ:@\޿՝w;|6ŶO|gbƲ׷3dsqύ;gN?fyiEoapn#xύ< Ѣ?gGmO?/..O;F{<]O}r*WӘ/\?ơ6ܡJÆ^33>hW Tn)6SWDq =! ՐUr+Һ#W,8#\AWCnVhq5Tz{`rv6ry6PBt\ WL yO6+PܾjGwukxFe>r} ڸ*U:@\6usZ 73\ $! jf`Ÿldry6KCj• 7gŰ\/ެ,xjXkⓜhiq<2k??~<Ż}{-QZ=˞v39IkI#$;F3r5GI45>q1:e:u+sl->iG uq5ZPITbIZNAt!`5%{=L>3cCm1Tg pEG\=vɉ0#\ !WCnsԒW+;>If+'Ր+4\ ^WCe8CĕcûؾW%-\ 2bp {_ 1qubvvju<\ 2!JX|03KՐ6Sqq5T]"kQ._@y5[\._qWlB.|{բiKe"?N.S9y^9/?)xz?ynumԞL^dٚBH-ͫYu;:YlˇPT1/*?1fGt˚y <.jGͧ63=N#zpO?:tsME~gf\LV-Y=䜕j6ޙ(;OK1/W1K}ק{{s+wr}P{^^#?je—~ tz^ۛwr9yc-ɚ}JFFp=cAmN)6g@BJ9qXRՖlw`O屯cGq]gXF OwBwKZ7acn` l&S$jACZ췩6ZChbU˵`0Y4jzsRTjѵѩ:4ūWaR"f|bLj4H&qNJZ`F1]s}Lf cKqkcŴ䔌5$ф`fey/TiPvlH.ٌcM":Mr*`?H@sih*qC/Cf/`ѐB8x\|-A\/ $!M ja&a-7K9`¥o7#XL)KW=&:J ,#(yG aHT [ZQ4x$ Lȼ`|cfd:]/h= KL A)Ms֕H#3xxĢ [uS YߚDy' ¶0VAb;^Ulm ?߾Iy'K3IB̅bOeLl3fXbM="%DE6 [sP6D2G݅Z cp#lϮKG]K0SQ~F,Zj s[4a+ɘ+y N0BH 5ôEzFIH˽e:B,y@Έ`@d,Gf^,El0351bBiE,?=(F;؊UKƭڰ3u njpF̦crhu\n-^D|gX!jϢ;4T, JƛJV$[5 ˊ*"V,2VKXS@@C+^EG#Kp03n ImUFjhI>0Og6y[1׏$@0u &]K@3+MF` s˿޹qdg )nSu9u#  ,EhamDF RY9$J},aoSUoH\Vm0̢j֭:&mmu@ը͐i ('5zwoϚfymR#vC%F>9R:5FsC-Fw[qPD%FwbBu0X7(H HM Fi3 !S{Uz\ ;Vc:BfV+NkBpY0?$o -C*$è# PeQ#RӓCHji< { ZT9a clǩjL1>5rN\om6vgjPc=먇 Zu|yͤ&d2=*\Pm[c='ȵmwYo5睃nnb2<>ּ: ԥ~ip7wC@*mH nX"g}NyQCv(-\= /5 =hemx]H rJ7w6]JOQđX6q;$ȭA2BpQi6֘i-0]#C,Ƕi1:6*@f-I>'ŇP4[.Xܸ 4&(]U~ī+I>z70jLZ̗w{qr 8 KHu@U!V &trw~+IH5.oZ}/__#q2T:ҰusxE~m>"k}ơlHz;yQpb UU$'InAN І&NqEm'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qhN ;ɔ+[Fe(%-N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'Є@cW98b@u*bZC;:qM )}'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qM jYǒ@T<78@klN @ 4I'H1 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@z^eѻo5Vn[mb &v%\q c\B£}%PZ%ƥ) 5圸ƸDO\cE&(W(/)0sqm1Ӻ\1e"W+D.$Wv#Wkc)rŴ){7R NQ䊁)F7R N)5\MPȯ +~ -d\Ӻ\1)U!lݪgz5/C ^,y5y@_1nf5wm\ PѷGw 7Ya\՛}Z}m_9hvŭ9mty+/oRnwᨮJQhj'~>Rtt?==˻CZTT'_Wz޵(ճF,=Qݼ'~@kWԪT5Ixy!&q^Azy^~LJp۝msV>U M՘L875iJ{$1o'؃n=phMa*C3_jOIٻrh:G_|ĩcJO!܋ox֩NL-T13LÔF֙8].rRiǔ\MPKΖ^bqM1 7Lk_aJG#+=YT8W^qʮQ΢#ʊ\=w: Fbq+ELrŔΊ\MP֪ ɮL)rŴ>.WL~MAr`|1rŸ"WL\rJ劂$p(HׇRicrŔIjr# +vU)rŴ.WLeB\UP\שbiu]* R>C'k5Di͟P9cw\44}A8"ʴN.Lee:F$W S1rŸf :fLi*qIYҜ7~icrŔI}?rEz:V ; 8|Etn88C8ʔWF\=u䳅 +&]\1ӥz\`? ʕI|z{Ar`2dWk})rŴr+m]U䊏gL%5q>b)s;NUR|W<\1pp*fi]Rjr啦 +; P\1m^2T*($W&W\1n(dڔ\)\|9.#ϐ%2&|ҩƵ+}֨hr6FLDLroS I+mAr`oU1rŸdK+S&==ݱ6ï3&uW=yRh@Gۉn\9vAW\P\11֚\1"W+cS b`+=a~m>%tIjre$W6q) V\MQ˙bܤK+ҡ^82=\\9H4w&S\`'W+oM,Jؕ3θtM&we2^jrHY dɮi]24UvQh}= ow$TIK }12͸lm!wfʭm&"ӑDNےJbr@k]RH#pv>n@Q]rE֪ b`+bs+"WS+49_\)[\1Ӛ)m\YG;+W6&#WKdWL벗+"WS+V. ޔ3wŸTj8J/ʹ=$W-8\K+})s;SU ֮ bPN1ȸ1"W5r+$r5A 6}A0AGGl~hXtOI̕Q,刋1aصGc"6 p5J/x@"[w@K`;8$)\fkD}nwS 3̄2-epÔbPNł䊁)#WB)rŴ!{@)SHvq-"WL_aE pW[F{bpntQAh[8J\rD:r\ثT\1 jj "W+N 98\J+ SF)ʕ)H8bq}@(rŔڈ\MP(FlAr`Cb\gK+=8ʨE&(W.ieJ+bAgWL$\drVS 2m~e)dWS`c[#UCH~Z~iGw&Xvk[e浛vncSSc&mk_fDGꤶ\O5k2-}4gw? |t6>SJ&Yc ~nOq>pAc ig'hPݞ⁨6Uo7{a̗L)f8| {Q7%u)OU.ֺe,5T.L_uQQ* V.ՖG@u) nD)Ã';KYmQ%s mD:Cmɠ/Et /+C jqP̽ݑ'RLH O<>T'!~ fbt$ZY^dξT$ T׵_QQohuJiWm>cRzC4&@LA!O{TCsCL]6u[adtF@ V`<^)`J`dwPLw%&"'\7RK۪ҐTqAʟ^r7gdL?fsD1E{3d%Z|0&EJV`, =_k(AUfrֲRJY=M^;K!Ud&SDO)IP-R),F&ō{~hݬтA=1-( b 1UTsq?':B"t `+ ! ]^ӕAIp#+J8U1]ihe42hEޕA9Ltu&ePW +]ݽ76Lgj֗IgЕKe,ter:]$:BDp \c+VOW%e+©}Dtes ]BBW޷ʠi3xtE9WΕ, \Ϯ DWGIW+&HDte@j;AIaC3gW~V=Z7T2xɎ8E*Ubp}g(!SzZ_j-x~<:Sm^2]ηmmj-vm#^BOHIDDޚ,I3p5*н5@֎[S()ELm.5\ɣ bV F]aGӕ` t͊ѲXA+j[c)1Gѕ m Xʠ%ӕAIE#+P+ " ]BWAJ{(PEDWD+ 7ʠ%$t2()NtutEGDW0h3hb ].uן J]!]1 ͠|Z^p ƻ2hANW%E&eDtsfќ]NW$@NfN8ehA'ȬѴDQ MLBӴA1ҴB7`]RBWʄOW%MCWz!9|`y袙>p1BNCtf燒v# tU=Q JŇnʠ 0TFDWЕk/2hm ʠd**2W!Dc+ptePV"ճ&FDW8Е2hɡRP?Qtwt ٕ+ 2@J(yWGIWL"Ƕ+Sd:|8hK} 镭Y:?Wb95Uzt'kJYJYQ4%ϊ?>O_ޱf{#i5q]ZF8n奩 :ގ󻂸Goۚlr,w=-dXM~;^dդP"(i/f_c>uGzF>uVrr&c4e-G_=@l2Y. =^(G E]55l2n02V_mkBLњ}hI%3) (2+r$oIʪ6~WBڙC:]z 0F֩qw54.MxuOV~dJ4~<_"eEimRRfe5vM8Aowhz$RibzBziPRԈ5u]***^(eQ`chtﳱ_,Bt8z%;wXH QBa< 4]F;ѿ<1$OY}O6o=U(<&\ir- d@Vת%gUr;wG}¸3PoRݖ*JBL'ڿ=\RlkYư\ݨ*ch)H *(r uɨЮP]I$Bӊrz٘=fuxe^C ,UZi,s~N߱o랦{]5"%4Ư2z8+(.nH^UU/SHl`~Z WVq_]^̉ǮmwkDޠ$FP5v4nM,7t7)bh /=.ky'Y=5jrkx}~_[![tq[㽞un۱V]5]encmgC*BO3:r(1 lm5Ìէgƈ96x3XDP j{25 HfBxY }}Le,G>+1Neb<2^Ax)!L>u,ۂv9\)~r.^b:ֶh#8e}Fym&WX-C4~rC8NE.iedx {iT%l_ܱnk%qX &%KQZXv7Mjгh*a͹~\q*̛`N$m1%&׍gxx)GBRM\ ]rVT* ] .LY2xv!^婓KػpmU/MU/p &Yqp3_ܮ5vXr,$ 8$ uK;Ï$bSWx XĢhzcV0RY?&|V8\Dְ}V?3%,hw69Kp/% |h|VwJLX&!߱JE+tLcIDExpL_<-z8hϳa>C;?kkUVGԓG츜ÂRd y$CX0dwN:<(w~`ɲIΊBQ!UYબ 󺒸$L@? 5g … /d^o+`<(Álhff>˳mZ9lQևbFĂCRT5 yϷQª1!9Y/GbqrK5#31p,{hu-R`6>+0A4XHnh4Pi`Ʌ1M &Md8&hC:qR<iKcF"$FKɘ*89^4)c<4yIiPV0"ETG鳺kYbdI8|`=^7-yV.Rx}ˬnwp^Idƹ,ɹqoE/M4c&=A#HIm]/0y"r55e-d 0U!e@m(x Ղby]6ERz,Ϡ_eJ*(*!ĸk%mlZ<傠= poLSG)T7;͘f9IG@ADz ltgu, =MvMitC;Oap,mψcl+DѬD,В&Ħ3!EY݌r1t_r$UV?}ģumc&׃IT,ÓҔlGa+ ^Y#1@u9=&n K H EA{PD:q4&TYn>mܜ^"N$Ed 2fg4ڿfs>x1bY51esUĺSG瑒u(>1HdV﫽L^:AV[{v.͇Op;YWȮ~vL-T}(^QN>21vvq-媴q_~dv39T6ʏ^dx]ize+hw?e7BB&Oe/UBq=_S/dW"TP?IKorF]:YC@nE[kȘJTJ)шy˺`hˉD vNR+_z1EU6>\VG1+m$PX}[s=է:eK,N0$/!)Bz!.""*;G>~o{2u?bS{KKw7&P48tut2aZ1gM'>ƏӔ8&[kv:v]&jXYy)&̫mNӔJDEXt:q#^#M {ףv_N IbmGQQ.˰0GF7xu)'?DS)baAj/0xf']*_, ),6DLcB|ЗuhBTd){3N[J:>=N"OOi6MÙv0HaxҐ7WėZEmZ_ǞO*yv 3M2fo&urKt, HXkۙG"򡆨y#%5yݿ8}"O>]Z;y|ӮSi[ GN^|PvALߕp#O#Я׵r"Op..횮c Zd&,|8%W)fz /y(:2q osT5Cӷ%u.L6|]<.?$_!e/EB/ol09:xڲZυ`m.&$n4i߿=#\㴫$;#pOCz|iѫR0珵XU.\ e/+1s/Abu~Zj5K?~^ˊ3NGd`]&Çh]>6Qx(Hx.恓X< 4]M1J 嶒X&z_5ㄚxjj)<t0;>JOfUη+_@L&":@ yΚ)iYwLڼWjUi"@N>zV+T4@ n(0 N_caM]D8Σ/,s",4I0f,nr&!+~g}^_W=\+~2A+WNI9u hz^!V}āO>?_ݾK<Ayu&RXԵSreP\x1a y7brlojLW:HPuﯳxsCB@PlYI1c aWdMV6ٶa.32XrGJI9~8~`(%(>N^l.E#ۆ P1Lɋ2y}d{3$/a FDri8 &*ل\w[zZ콍=+IrKJ/1I^ˊJ[FpEe cR3{!\&/1tGGAZŰ'7Ԩn%۬d*?*I($ f $)SYnNŹNP6Ӡ m{Gpӱ]V/E@ĨpMn ho-PLjE?N{igt,+SCz)f!LN:o1lw^ 8qLL$s0#۵Wrb"oc[(1hVwB{_\H~T PFi=ZH.CuE|_K,%IȒ&8EIAJ,$9)z5 !P *zEv1F]l_@`\w\`lUb \L IJ] Ne ڲof!l{uy' &e:xw@X 6"Vu!8j18#:VXbMWtȐ6-2nҡOp'ꥯplZq)\Ot5GއtY71qR;S>Gx<# ΈrH0~pIdV;3Xel?NŬ@凴 i1#\3Ld\e,^{{xYH)hS0z̏"J@@HP,b 6VzPab@2\-iaFWl' k#_UF~|aRZ(OIЄYpQ xƞ@0 X vꝗUQ3i߫cexr_/7 dK~j_5=8HsE;֛hB؅!|¡7pm O52`G/I(Xcw(S3GW+sEOnYvTq[Y\L]Nȥͬe)K E0^8k `]\cAT6Ђ_8^dJb.|NdoҘW "' y6-i8?B9^+Gs*ጳIu(ޫ%/|\4ce5Է !ě<~ښ.T@bQ93Zce7mg#m3;,yPgBJ8csۀwQTfdp3l2MUd(/Zb4Ɍ,*M}>WvѬik=پoY8adöX~6~-26 X,4V [q.ұ)(a֊c+i?^"eu䛱hG&vt91Zd Л5=8鮽Ѫ)H^+R$voebIՁ&8R\QaJ%š.tV]a5PB/olUC"h]22V׵\fpd@u,[d,\km|:&R!m>i8m@@sjFE`NL <7+ ܮ$,.\'c~ M 2Bѡ*!]D,v;:VI^ 5N'&D 0~k!`B%DX,e3.P@W=vEDB "#hDmܫcE~Dc3 F޹1TPPq/8x+i6׫,?vs; [ŊZnn#}<_NѓWwHeȌGW1E} & F ~ J.$kjb,uD=_$fc(.$< cIKޘz$` ޏV8DXne˪"6ra&Xow1;`ILwx;Tuuj1U@6K>Zr)r|03(ݡqMWt(Ұ"LF%)(I E-2jȪV+4Z)2) hc!BM߉MuXUf*hPCgE.$IePB]>_\MkrcFoZ.yPu<%(*,Ey=?{F/ۼ_ ̃cOEf< PUdۭ% =UK*e [V6qv$L˖9i3#FINh}ѯYNHnZmq DbBZfhg=\acb 5ci.AdXtj5Pଉ 8]2"1Ls?ZWIfD>1acPb "2h-1%XpLyO]!SۙerxoTgvZ%ůWӄF66d841չGTiv)X%cpp%aOD#P4NUr4Os?oŖ2>eM'|5:$Mc 1N1Ʉ\7}ЍCҶ:n> L_ZuEra!5(ri"w%\Է_+(l?!|@9kMXB񷗠}aU!_@8-hQZ"z]nzyŻc0齛K 8T"LjW[.GQ?J}KJfl.lw[o9ގڸ%యVU|_՟gnqg.Oqbm~X%X4{˞"};F߇_[5're` ,XHX8Fݷ77y[P$ O"Lomorv~w?ֺ(Ve[oh*n<^,ncx㯓Z y5$ۻ  ,J{w΍FNO0zp`,XrN6>kVDxG.Ҿr?RͧN~eS21Fho2>O>S R/=`;.w6YPqRS8/Roǰjwmn!r>ZCց#Yi5efT>gn})᡽Ggw;q^71ƅv5Mb$T2 }xտg]c˧a]0= X!Dv4X:c Z✾ve}[N~}f#QmcUQ.5(P' ,FpMܟI[S6Tw9<$C) X\!mibi2v5G1v'P.dFPQ4X%Hpt-2JpiM @s1=Gt hq4NE\BƳ9o&NXj9\(? ,;b`0 g!JLc,E<1bcfs*(g!Xdѳ54+2щ|S1%ʻc塃<qӉWapvЬ`5m%Vc =T|\ 3YG\Oj,d! G"Q`P컒/dxfiftLv0@wl}LrC;RQLRM;zx2ǐUckA7hH(ⳓv`#"(-Z$N'CM P@s+Th4 s+\5 fvh>(>/_CT0Zt}G틻r"b1{AFd Y:FF}z5@"֤B OmEP+2lԟfHg pNH"Cr5&MPRd|)^&.0ՅiM.[tՀ!PnmDDژq3$Oe.L=LS`Ք,pp[6pJ< 8Yr$=8HUh|kݼ򗶷FOt5+V9@v S^EF ych%`zy쓥dQF cEQ;QaFNbiL`XP`)e9Sd򻑉19-^wL!pU:Th2HFn/37/,MAؙ/Q-;,{ yS[wX֓X7PZ%ov}6Y۩mh+rK>[U e5GK%0LtxV ğSirÓF(bjԑSI@_=Fmhݬab EOwXuaAU,tk+UZf+uu@Mq;E #[3sJڿ(?JȮ@e@Tݮ[ØHQUQ`<,U,V[w&?X_cn›U-Xm0y07=?X>|++R~WhJa`!E'KC/F9kƕ@j&?m4Qv~D_E-U.U+8I19R^Ey+ g!G,pyYOP ?#*0gay% P+NHC8'9b:_(7Hׯ7g*eMnk-( ^ 74-mSPʰnPvZ@PAlnm?ո,Ǹ6CeuvUUY4T1;­T6K}`6HLY4xs4( ,8-Bg0S %Liѷd<6~\#䃻a'ݶ1>pq!8&N= gTFSq4}u.N$)|l Fnq&R~¾K/ %;|2n g(S98IS ]otCaC&+Ͱ:g9_=n*c*r'W6Aj@-,Cs=0 ѳ5IHo Y\rFX两"E`434[0dǶKzhb}m|\I^:GU6v!)jQbSP6Y(I,FYJ&Y Z&jG_r.Ѹ%+_s2z>v|?Lk 4r ʩ$:c" 2 ILT1Đ|b)KAl{=0xՑѠhKyddYaL4_BեxZ+0qq@9pL ™fLE!`fQ%kaEC.v.j?9.Eq[g_ۘc: {-滢K EF%g"l3U)On͈E\Q4 p; U̱X3bj::ڌ! sf[#[/ȄLyez^뻠%G UGAchI:)?c C;8m=_m#<&9N߼ŕMp'g tk y&qHS-ر|.VOw cc&SwcDlY> sAT/ d @ā`apS"?jü60! >\~eZ`p<է5ƾjRv?'B *m96Fb-MF?qc*n%\:df[ ҕ٦G˻(oͧxzeQf*e鋶]4 Z ):sj+gDӯ*Gzu J9:q{#=zԙ؀Etmz\vsGؐ$~-P_! } Cb=Krnji4Fo\ޗv#(H1ǃa]g+_;Z+~w*t5 4,&Q(ZES:w~"/ snEc|@ruۢ/4a{ sEYHԭaQ4 ؠTН3S@lUߦ|7XHp8?zI,QwS7|R.f7YjnށryVypPaSK? 7- kLp1Kښ D3pM;OȺT-kJ0\7bQBz6v'r3Tc In +NEXsJ uPoŪ˫HRF[NS ŷ@8'[ =t@?ҏ TAVWK`JMeRax(0KPw&^Of >-*_o~_no#ssZ̧]\,`y>Ix@TK]kJ0%xߝ])NX~xrEdꇤ_[+ۚ / to ^}05F-Y)^=d_^Q" if DI_'q u@}Dx4 6HRhQw(ʏM}{HD7bӕh:yibz(P̯nZCu$6M9%bнy3&V'GI4h%=EbP>M0Rez˵&’%ė;Iz#ʞkɞͪHxCJC&pa$E$ T ?24+6i4aG hdo%%N ӑ ;HuwKI* 'zb☿Ȑ6d񗬒K5,Urï]|߾3RɐUb rj,yY֌1I"L(XyBȠO/tmFŗy7J`ؘd=֥VbPCA@$!R*9,2O.c2j6^&)qoԉ$Nl(LD_C##.;5k8H*OoW;dJ _Z@( s1S8pĄ~\.l5rǔIuԱ7"2]Vv>iam9Em_i7Mmٚ!ĕqGQqH~7-@00HRf}QAt ,o6HGbv4(%ob:P^k :V sTq"{&Pm'`4#!7x_1X ~zTAV9m\XYGV$T_P̑7Ebw$3 F?n~KRǥAL8u#C AIs'uV/)ߘ%9zҧTI- UDh(ȧv[u]ՙզ!&dC:(oPLu(^d ?^A< S&LE6IOQ .ѧw$xl>+EktU */awSQF@.~I'bv3e^ǡ9;߱`]^D*}{%rUDF>iT <#D#,%1:,-E0 KuQ9WO? H<$r߄Ln*,u˴^-^wqyi Aʹ8r p2u#Q8DZO&W䣰--5G*4R_?QC}bKg;o K6T?(l.wD*3yxv\*BŷsJK1$XZbͣpkjUJSq@sbOx /E)^%E#ӄr1A?xy@dئF&Iie*<--+^J6ƫfi D{w)/0LbrP`Hc3N }(֜$TV"["Lm? {ߊ|A:֋+XKViN<FK.4cF ƱІhmivivJBVN+ uZU,VꏫJ%~*?,8PԇAD ~H`?nj_VWFNXXCGj8)HjS+z ZDf#:ޯ#=?n]uǖhrJxFGCvzJ42觎PBZI%Ľ2Ӫ~:Ni걻)JƬz ! KH-=J"P-U.8 jځA _"<5h1 ^s~Qe>qSFP_^s2|ہU Qed !(1eRVI]^$SJ&i_u:Oj9N3)R <ۯ_Wn5U}FO,6ru$ %`fۤNtu瓯? W^k=bz8]^`~h`-9[@ȑB8RP Sp1!)R߸Vp7WuVEm?$:(g=?&t@.)Q\)62BugZ+ 沨S&`x96-v~{]|joϖES.^P =H;s|/ Mw!#O'C"?`xc͹q0h&eʐ +>* 7mPmt6'Nm[ c8%2ڔ;kM=eM)6N &y=A(z.reсwJTaл^Ki|sv,w_2{yn@: I舔y8C%]c`ꈰĚyHzLw`Tb8fyaq 0kS.o+6`J! y ԇ f*e}x4 Z ):3ÛW:2ISj6^j|G"fP 7mԧ Ky`)6 ][E|(@dNZͪ;1Ni~_uw!3nyC 1'2i|2-VkSo~6l6/gn?{N?v^v  ` Zf^`ktߢWd^+}Rӓ>֑"?&dҀY!&v_?Z׬9Sxҧn~Q6-?@W O .Vψ '$ɤq|q5CdEI$|Kc/pUn(|,+ʰ)uY5sp]H.ݿ CxR^7ĺNʮ@I-ZϹ6'Z}\Γ;֊p2騾(sZ^p^Òf%7^QeɊ)Frs% 8|fዸ(rrG ]';NCjtTSs^Ss2`A~y-M1{ʡiesy!ke +J2G[Xa&*\j :`7l: }G>?92+ GqTRd*XZTl:5`4H~SXyi# kyΣEJL9(VE0Yg }1͗]%=G*+qv%T\b֗õyVQ*fR>LfZ&G_ILN='{qHJI W{H"+_ƓE]~D Ơ\wz_CK)-fY]{!a VA?cֿ l}?#ϳt&~|-[Tߞ÷?n" ]Yk_VREmQ%B'4p4V䖓5Aj sFSieY,3_`IE-H EZe0TNk (>plPr|wviq&NyIG_a}JwQb.[. SYfnbmeuwefrҲ6c -,s+ MFdVZq9᥸]';ND)|wхTuĐ/wfjv]vyyD޿ٻ8W 9H-nd ,]8 oQWIHkƖA{X"%y X.W$,wA0 M"}Y4<S|jl8>ZR81Tm'G!KԾG<Ԓkg|pgM<8Aʱu9.紋6NL/5ktyshwok'w/S3o,OK68|yȊr .u8qŽ-~:mv; ^ 8KnG'4YOko>OBH^R#e$^m3oVY;ٺrgZ1cwHGIn".k&*.{@8fWAW\\{Y2?G9%ZImfI/\2' wڑf&.WW`Xgxtq\k眱^/|8c,Rxr9_?h/vKIX#j_|Q %^g۸se(r{a޵mocހN?*$4mr}C!f}0K$& M xm XwQº\ 2]3i̹7`EDJyTQlW;U#lVWajJPCL fh$%g"d&pB 77xxj??"jbD 56G 6I&pB 1}hD Z[fuGՆh(\Bd`ى-4nZY|LE '9aǨѨ= p(7TcO˯&I  .9ސ#JF 񙘟%zf&:Ff@vy7 ":eEEALE81+jNF<XV{Kr+>T6nwa 3+w/YZ,󯷃&k;˾4(Mh6 |͉6#Ïy w~![hI;bc8BA֫ 9#M(8+V/wLJ7C%s! g5?Ϻ#8 ]|GҦ5Z)F 5QL\RxF&F2ET^޺dt j,=zu㇘Y(QVqx8^#'q3%_g;xierPHճ&%.tغ.r|`"x8v+/;vYI=|^YUj.[:zCVW~]܍kyւ? Cjl4 gO/ L5!fqWb( u48R l1OXQukSS"5:|plU&(V(eQAf/bnhܚyH'MnF W[ҼkPݵ0[ 9)nEPi%5yȻDޢcCk9BYn8wqW9xqeuECz3$wE`qg^io˥_ 5`V ѱ.xELVU)U-4%ҹL,Ěv_RGrwx3b4;TO!"'uZ R‽w~Xye,zb~tu({۹{L۟ڍ}k7_r ~8_ ij丒{C]ִﳁj8&s877oPO>:Fvd5܁h :.iy@|+vZѩS+@ @dSVK/̎ Dū1Ɔ{#+>OG]փw\S>mxܡҋDc %acZ^j i7lmգ3M {yQ9m;SpF}_Et֜1`G5u[h8I #ODZ?8Vt!uMFmD&3&1|>eiOH+ܹH{`÷/gsC4 jTZ"F=DM+oh;{mMZ Nu8V) j9CsZ-&DV6c4aŐd$TVtZ-E1OgݐQZr~-'`d½qjMԳq:N&Zm_2AmJ)%&?Fp]O74uQ4fnr%6:jBFz~͉uPg8erV>Gdg~;1cW/b`Hdֺ.!;#X)ݾ5qIg >'lo=@Tm 0bIJiFo X.alqRb,kFAA6)d$pCc ~حّXq<-GAx[ `"Fx/Ϗ@=F o5q26~羡!uQ?elT Wñ A5ng݊Aoݴvrvrem ˤ LH{T{kX(tI Vy]k?,c}q?}] ki+u۞rxYOv:#3 c &^IلVʩs%Э霗 M SfKvu" MwuG O` 2AZQ=+t-?g^yKV^ea+!ŢYuf/ٵ-+Qr ŀ!&lYAٌ"'ѐZ*)Abul#"Y כq|Vn/*IKןb^a`h5DiEDl{2^۝PCXd MUK\`9\D@ҿ`X>Mw޼|G(ț.Y0]5*(Qeb":&+GiPlGQZQ>T(`C:8r/VS4ˮ\(GK9R<3( ωzY1 HYYb=Q{u OrU'Jڣm:"G.t9.aAAk%3O&)|.Jvd#kTeHTIVUH_Bűv%hْL[x0~T-@Z/ 8]%J]7Yys(t> 0k$Tbmљd`X4iI5G;9AQ&j\sKW~|G6tvuK1[TvN˂DꝬoJpWےW88I;vڨO_ao* -[b5'Gs*Vjp 4Jk/kTIiOd:Is"҄&S&DZ8C8 ~?{׶G\$32#"@gmm1<W+RH+Ml6Rգ%% Yy✼De=wzi ߗ-;@t4zܲ Ѝ.sZhXaY~Y[}r?hGr(J`Y?rrzYB`oHu[4/t!`ObZg@Nٍ6s"۰GNDoBBfR;?Fw0m*ぎ/Q6R2eqR鸈nshem]3t#Bjw/6MtkބE?]n~ΎW#vQzW*x~yVa!XBj\ !]Y. Fj.X2BBHsVM"α/E\Wo xlQ i2Ԝ[t|DG)r{ۺѹ-9C6#Gn^v]ңz\R38j-qU\h_VBߋa׏WxW_ݛ{H= 2}H{m+?y{uFZپJUv>XuñO_]lh͏<1 ^|i(0ݣH#xJ2%9QmjU0rTָ*jt/b2u x5y}ЬQ0Y\>Zcs-ltjG :.{ cA:K Ľz]Ez]䁻UwV5GeX՛&$  _ ĎtvRʚpJ>}O;Ĵld݀[܈AEz6{'6.3=F+sR9:sf&טc0 a2*d-@n^P✸} J.融=a7*TlM))geMHS(u\kFۊ'Jv/2g l.SNtی_$W UwRn͙pcb2և1¾Xԯs|aL#{e \WHЍzLR$l]:v.NޒIM _cd]m'8GO91[#71|< \; J/GC T\reRND<t˲q[-(/&y)ox%`}syꄠH 2:J&D}R EIa1mǿm^#rV m׳bF'jGLxhL9Uv֌d镣(-5|"3Cΰpk7i|쮄էnwX urQA(ViHhl͞;hb#jt']ٖ$Pi9eV{yNF6P 5էRfPt,hcъ*C *5@%BL1G8ވ.#)/7Е\Mӹ*;_ zi؝f^UjSUb8P+k !h"y!Ѹ}C8\&{Aܱ`-e1G*/ۥ|]SN1X@G5v0dpkA%h!X5b鞈bp 6p/P6))K L.'95 E'R=Ljū> D08TXyFbڤ"n .](\wlWXءGt7c|nQzK~aD7`ӒhZpcC>>!zY{K~S/q,:K6i(݆̒91O.A< ,u,t|Ctznt#=Ճ]`]3։ [ݽgUd },B1/6rSwҖfX,CzGbaalPrO( oJ:ύ%؈[.FatuȊh[Fшe6e#ֺE.زˆk Y׭0]e(YZ jD%B{$cb9h:O1Ax}]ܓDJdt7fȷ8b 0.=|^$"wK.ɠ@UylX0/Zaz"ɾ}hPGBnm1jv+#wQ.x.Zfh& #='f7;F >Dn90B_:&Wa&# {ѫ:u} 3أ1[Hj4Veb3"-;(hrna]$ԧRэsb`jފy]XR}1k+޳t-IJAK}FWV}pFK!?{F-縜~,oԥP"3VN6MUAuFfC]Uª`h8*D[SFNT:v;@B͙Y;L5 Vpi]64+jվO1j0{ ([ݹƸH6,ZARL:Fmzgr&VM-i5hέ%KKPJv~Q= U_u#^l-36W$j8 JNɎ TЁm+ǹ9oipЩß+ZM:;mwo$rjŁj|Lt ֺ,}RHrޘڴd= TO5I(dX[; klo_KO/5Bڧ~g`, q*taÊ]z}\D(g/Id=|Ř|ywQ6?o[Ϙa{AVS]W]Y7ƴ=ϫF^\9p?8́_'M0<ݯUnyx?8w ;8ܺ9}tyE,jdQ$Z?2)U̳s ёeU멢WDe3N>:~ UdU}63  t# /Qup\1I_f &\a+L'`wJǐ7D>,2]8;-,G3$u #ަz3ӢsI#8 3!oc;8qIJ~LAG_M69ӖgYXp:w۶Fv&|&kh F-yxͤbvg<%(oUbozJ"nsnwqbݑIPn2w>Ҷڄ 1@+sQsٜBE[iH P{V']<ŖW6Zj8fV z2hX;40" h!A w(=RG8e*_ `QBP4YTbh2NlZE򍒌j<柈M%O\!krз;&j%) hp w[ P1MF(W)V 䬸[.,bp`ǧ*X <3@\!˭'UZ,3MQ@?!>dR֓'+O|^1<)ftn_7z@%n!/a|c,X<ײrĝ}<گ{񈇱X7t)z3hh 'i}ׯ7OI xx6Wob[u]]1Oct_qIUxmq >L`z 6lyܫO0VʑPٻ5!W4c. N`1]좧7al'Cͳ,OM1MFK>h& kHr]g9~p2A!{$dcybYA|YNnvZW<~ik_Z %Ǿ@ >) 'Q t,V)NSBJCzQ+$WH;ں/ZeMjWq`yQJHF):g_6pӆca52˹^IŋT=x 5j&V3d߬nje1xωxۻa6(gb59fFr~9;ys@R*&jttRR9zwЮ#iJHQfΟcƊ i7>}A,-ˏß_Qq^]ߗO'!Gu6]Y ;LJPNGYv)2 fDe&7uF_n-ݽP4?|\r]oțQlGx!(-zdM~ry5/׳34o_ʻf?/~3YYym?i.߾~kv=eǞo2*7/# l}ͅ ׃\ rpGT\UgnnRŀa&kā=A/]/ N+ (h*4؃.ׁ@wWf5 YldܬTeyFREV dk#(2ԵK%oUnL Y=fe28i [pMkO[pw-T,nw!ȁk@@0b8L<\i<[K)\x=`^Y#P\T{ Wg{Nԥ$\l&B/#ͯE7,Ra râN#[' Deѻ!o=1"0QL׷9)"$L3sQي}JwR?NJtq?Ws 1|/Zuج|%˱ LE5%j-OEC.7|F5Z5_-~Km_/y6v[/D+HTC^ӣy.o_vKf1at5"!B)tU>PP:i^dO;5{-V/,gM(Ev'E|XAs1h}GL~Az>R/wr8Ro)2XAtkQbvB`h.ǪyeWO2TGOrkawє5%xm6}إNoNޟMexvh,o84`] z=YkvΖ@.MDhgS8W_ϯQퟛr YDDp:)MOvLa }:l"!Sv 7{N;v_s=k_< ?~wztk*_!yQe?FD/$Kge{Ku)/їΛ_/׾ŧ?>y!(4fإ#[o7WodՀEUG׮0[4=gV#8\_{ [jKG*[|/pO@U}|9 oS\qŐ''@ea=vo9pMp&ijoB4:~FEQ; ,P0u63}* ,trNt18Qe$뚆YJDo~M G1{,ʐsFj4;9Ѫ X9j1ɤZFhihaVw֣ ׯ#Nx-ԣYaKo[t|߭a*?{G&>nĞ}ؘ(lc;~S(_DQJRߗLc1OwM5N;zѿ^)L fq@\ǪߍD]s T=Ui~6?1{Q)ebHu`RFp|AN<30maOh.' mrxKh6 &;ާH vH*k m1h#;Ҍ^zU`3F m=6sΈcG@M7ޭuXvkvk4۶m< q)@}/8Eq7z= l]8"6nśUͷ D[x5t֫j3_!%\ߞϸۇT Bobog|m%kfDLټ# c~hgˁ@ 3M*&B7WO- aXhܡ8~Uflexi.n#0,m~hg֋[ݺ'& O%H [ݔ}4.}gY0~Qzך/(O&>o6ia@yH͖ը!@<1攉2KcƩb^*1Bp\j摽l7k7}g k} L|n+Q |`2š8(bkFHCFp#p-!U{,վfGJrYA\[֜ j*! 6?n ČQxw,6d0wB뭷E;kaCX+332تv$GtcΕlGnm'"2+d7KkGH?rU$ae}9a:6khs$9=ͯ;,t=?hy{=7 V" g/t`{A B|Éw!+kr~&8/ T͞,;r yzO^n3+n:.^}[6FmU}A'jooX;WScYM62f |%?RƸ2{9X I{FbJx驙9!_ (4kFq(X1hb*Лo惺;RG@p,㘪3Z3%-􆡕Tj/W8C[~=l쒴օ}/'_'N/,hDʐ2BȄ"N6*ޏ4;Ќ)HS:&iY&z%a%l$K<~k"64vc1UNfﴄR D9QYZ̯5h/ޤ8Rta.1MeI{xߋyg%2DF@*^5K7 6Ke4BQ#FGznLq! B\+5߯0~m2RblG>` TS2`s*|k9Hq̵FGd)ڴL)b(3/jGz _z8Ya|53=wSo4~9/`04Xt F?P1g3c9gFtm(W/GZn~q,\ɑ>yw|_v]y7d>6S20R)acN\ZmkX &%e,J}mǨcDX%$Α9o^[HcQz'6؅`aZ?`<*'C*ymT($FQt%qf\tE#Y 5KDRkmD5AR\P0jR 0eganHkE kqᓱGuѽUnTw5ibyyf:ogqfFo$a?N?דb% ? H)`uCvѡCe {~V;ȜVIFmRrģ㝷._7%q)Lo>U P Q~B~9>19<}5>s!Y/)sLΦ8OtǞmM;{$={9?0G~0jE<̟ 3(ဣC PcJc6$ QP\9gTIiƇv2:fUL4r*DGJԃg+˺sJF6c޽;Ӟ.~ιՐ f3r.Jf.±NAN=S]5ϗ%x;i9@ [@ɑ3^K~¾!O?VBTZ?7=35ňqo=n{jǠ!]G%|'7d.4`egf5J"}_`szPqj 帢1i4,udp@Y\FDэڲ5 " qJv z%ęw\vphs*ݹ̌z}'Rd&|n~™1rNMw0v3!/IkЩ[h;IrYKgX^r=M0m5 Oȱ_q\X> M^a,>\':j+hgGUMwX ^-r SLL]@#!X|qZR:vbl9Dg`1;gޮ^wg^^x1Q=_"=p4R# ;?#|ExE#Gn'w{Ր#r2 aWP1otdS*+bz5^wGr'MW#mZ}HO?uӑ4˯ r,}Gɧ_k" 3%6 2|YU{RO'Du=ńP(0Wxl${kIn? n՞R:ܴ3(C^ 7%to7E)(CcUFT㥾!j{gns-opL*faiz.t¾E~|6~=2""ŏk~ B}_wo?s5b%o_ζvM,?著geVD8^Ix_OᲳ/;};~ܿns=w{ÿ}Uхq6s@_}p|bw)Z?u5R*4l?VOsoٱ._\}^y1MȻ(,gXy#ȉg3"LcO飝ZE2ri4 jlGkյ '*f\L~>qzEv 2-8z@,?zG6}b/;ǖܒja<)rpSRb aé;gqL7]R =kvfCKd/5diw~-d켸ݶrb7L9` Mطʳ/y";[Fv7:QOeWavڅM_8qH.<`rvJQ_ Ѓ-#d2X ֔J5E&T kZ`J qqbg*6ɇ=ćVQJ0B1?I1EgMţ2bnƹh~܆gW\KM2c涿C!%e糖WTL u۷ہƷpP~\sh_|7R@56] l!_ͺE@ȚY4h+a*R1#zP 6t7-]9s贁h ;U,0(( [ mx6lơ^JkH4`K,p},Ɏ{z`ܜ@`Ζ+b_VE!Sx (;6S#FTYD}%- ʣJKbg6 (b3T@;Cy83s8=W睒jYT)evu7 ;phj$gegTN/ycKJIv>dzǬu!}j7^$oWΏWvnF䧙oc]1ՑwyB/NnZC~" ^iT"LM̋bWS%b}'W>;0|³1^fGRy΁YQGMwPݾ@;rRҁxiv^v47p(Ks: ;phj9-%(P6ϐ@^1})jTB;jk,nj急[^hW]}COjNOnGlh|M ?hs"lTuzz釛|{4'ZaʥzQWݶ+nwbVͤȷR+Pi LEЈ@FuӢvT2D%=/`_1@ipӨ6뚡6A`c2$>5~txԑ~ [4`pCdi[I#AM2M~}4V"@*qB6 ! $#ƐZPf0A"mw)2]i8gƸ"PT_L30ޔu&bO(a2ř83y ~1+^cL<"g:hSRE]1rsKVup*Q6d7U ,,sC8T";Vjby SUk͜.[W]v+',zd{LZexNfJMAvX>,R6)GrҥF󦝱VDz#VŽHJ  IgŽ:0#V#S'osx$u< OB =Zmm]t ǣg%6DkĹ2vFvP~1xNrOXphss4zBmtLzMgHMV P(*F' |/񼎇mXi; {6ײP=D*Tq6i:JBm :Ut9&E2|Z :](;||By"/}u%%[u~xYLW3 7uuS媓ȏɮ,F^ZQCS#}NXK֪ )һԌRIeyz?㏋?ӟO.w'+ڟ>ux}\7CM;a~θ?z(Okے/f[:C^GM_(xo7w gwc}aSu6--/[ ^1MS.D| 19Պۣ:''ȫWgވ-T^-וQsM)d]NgK{oKH ]57pT G/dR1,d@k,oSe$/oZȐ7"P$@L"gLP*i ** !)ce <:ۚUjJګFCP׵tMT/djЅstw|úO~x$`kFns {[C3/ΜF1q" bOJz<Ӌc^%\ErB8=y-Eɼ'߈Еz\V#Uk.LɖMb~\TVxqÊ3 @Or$br wINwe۾̉ p jc4$Ā18mi$ĜAv=;6 sgG:XXarǯ-䀘 RV7tYG_r;Amv 8FI*#v,PrL,(Z`=VvP~\v˲/viNR+mZbsLT2iI'qO62~2ü"Nbf?pw6fyߞo:ͯ1`^SA桒fLc*W9"B/#F:Λ­rVS{BRjp*]P VGu8ieS.aWM}\Lcs X_O'x5fUdI*3YԬjy4LvVtЀcID-*P(úIzImek꿳RԊg|#)?j4Zu6M3_poW׏KjJS~?u6eHRڭ򞙮)m-kQ ]>kv 66m0O'wwkzG;{]ͿajO}7xbl*:{B}}9Y:Ú05#į7n_KOڏxz@\?V]_܋rt|{i=qPw"_1˄B1r4}KqW7*R:}C4./[򗱝l7`F̠oT!6Uڂ`|V/ujoA=#EF}#-ڣ :x#}NU3uZKfY*(QˍLJ]:!̘nc`NƯQMo̮O73y\̀%朻N +d-8>4cսSڪByb*g gXײ֔󜍲FX](aT3sԢKCOejCh_ku>ioWڂ'!H)-'.I.KZ+fgGsgsu9#Kg][oG+^E{W Ȯu1ڊiJKN )yHRg2j26RٛkEɾ9#wM%^f\.&oJNğ80 bU{2LѭOHS0Gqc=JdpDCz(%Ž{BY)fhMDjNVjN9zl1QnZeše#闪_)8C 3u7Pv^_][8|SYtJa{1C2$PK(P;CFO|BV?Ѿӌ&1**sjXff+ Ȩ85zA޻,KJяӫKdk K]͇몖pfL<)d9u581t?uɞ79MBkep7)AB@TZXD~n|g[7Z2UOUh`*(P{}B?ɁR0џX[8Imq%#(zT|or1J4^nZl>t6ųϾ{qvq% V{{ጓHi) Xe\wFo1o^[8mD5Fd 2ZC!R!J{i"O'i@ɴyW{#iX'4We@\G?CSi8n2YZ 0g@*ށXQ)kf L?|hro0^h 4T[; Z+JAApn<#JLr5:G 0rF Ɔ*"Yu?xB hwAY0XDL t M&sZrdшt.AQ]84e4y]_u1+jνE/*Uyz]a!E$d2p&EKLF^:I#i)XOOަ)umh-)H^L$RQ'YЩpQSSKWL4pA:Y>8] o%k p"A绑r{k#^S5H0q>X^+OPOdiOQQGIHN; T* ,T+6[dřgƜCY'hq"C1-e>h|nL}Jk zRkQF=SR@}C¢40 =b*Hv 8>iGN;t Jd3JBKÕ{zhP7W3<_rAkW$7K}"a8wQQ#U5"@RHZ"DQY odkc6k2ټ(pA}-BxAu6@Qv4-Fc4dY5ʶ 0dBuI]&%i>|dK}ɱ<' 9hJZ6}WhTwW޷Y~*0 ( d6PfLzٍ1҇_lfF ab{|*7ӫ[0~zf(2u(5A鰏ζ-GqH' Yi 윩K$n 8dusOP,!5>pƨUG؛@ Q%:Ȣh9Bt uu)d ͤ'&x4Q1n`)F2tr"%48,iA~(dvP\ˬxw/|GdHDr /pWXyƒci7CxH{3  =~ q@uKY;h zG@/|# +Pġ({Tr U胻% ͸F͖ΒLlLI )9EuPI6g! iP*\4$a,:tr^ 'v)@$.K $݉p#rPj暲@cK~0\X*Ϩȱ7l4,:Ɔ/61E `IJ 5sMq%j"cObۈDkU6WpUԭvMn劸l+W>TeMw]-@!`$L$ of| -Wmcf&.DnG:|>wۀu~I*=df_rsWy.{${i 9sbh@ ICʤA)AS5o&vTBj^hUیjͺNռ SB˚cw?HR Sq5Lht6QXu!XpEDGHeUX67לr sȮͤA,yl3oc&ۋؿsd U'I)`:.ѧ`?)"Tvu94oi Q&Bi`2'`D0"NkU VԪ8n6{a.r3*#72r3*#7za@F\ā Dt("@=KJjǑSM&EdwhV6>whQ]hTƌFehTƌFQ(II+I@+xc>(٤oVo\rp0eaIɴ0VF|N34i#_֗u7N=*uQSJzTשہ1")@9)c^ʤS tIЀ[(4w,fomj;hYYJwA2쒬Ӻ[ʄrZll,5HjC*(4ȸۛc̽o`E1\M_Z%֐w5zלK.!nJ##'Y !;(BODᏔ6@.:Y\ Y=U?ZAس( uMH 3.(|C qJe8&\p< e`nzmPduGvʏl[2maU(LZ-w"L*T(L:/χ-02n߯^ 9%סuA.+~3Q^7,MC!O΋,;/S΋)[v(HX-~fC267CB4XÜctIcQyq bZKcМc>@)PB ?<+mq;i h@کmxou"UB[@OeD:'7T)QZ)aRǔ3&: ˨Ȍ@ul'oY7 oN~ZIp`THާUJH@%mJנ놷 RoI>ˋ<S<V<$@,~Vr. Sʄfh#M"EnHpmI>$-y ֡_RڢN4K:2~0yX[IjOA1pd2܏zq z&- q ]O~kIx֗#Ni|,S& εg˯&U*"~50Fbkt@0ںL'\UW@t vdžLpHYkiq! Р ,Yٻ߶qm0sNp_ C_/E;xDRj9P";vJYv Jxȓ)PQ)tJahsplٴusA@I4ou_Ť@_w:۔@x0BaTƲTqvjuNWZ!Ih!KQ1qKGK2:fDaFc0"'ʢ`Lu`z/K}5i7aMv@kTV*rޤTNKKk|B#jIĖ􊵃gz.p&`zuNN0ji6=)Qeq515J0  ̳#;L8 YPBV*b5GIYǒ\cS{m6k#Ao~oOsTr]}홒3(* /KR:^qVR&'9wb;EP*ұadp25]R8u W1%Hޗd冁+bYf67gxZw%>X5w8v|˷]wqdqI8f&&R"R&QBb#ϝaRY6vЅĒQ)* .^S i륇 oӒS }kЈ 0шF4b%>PUFam.;Sd&Ͻ`? `/!VXHS[&I7Lb帥NZDHm%$ CRE} w@^m({ < VƚTpĝZ`lm±Qg)G%1Y(dJ:>_EqL K! e rŕ{?!{]>t9Ue m`OΚ ~Sʳ$67{Wel)$জ=_=?=筃f"0g^w$ADD c[#ҠepY-:o^MM7 g8Eea kh$XݭXH0D0e4QB9q呪9UX WGl@d հQ[ l{rUjBrE $֘rb!'N%<)K]jE)VVXJbhX_QD ̵1׃8qYPjAaF ڰ8K\*}pm_)2("E>a!f"oZHtMVdFX+z08(U-^M+ן'޸q`9&m%L?6a4V La@篥ګVn"@j㎒ZͱX$k%K;LM) ,ӑF̒8Jb"ŭ@13)MǸ>NIB H^}O%QTK]+C@M_h.ddk$|AJ○t oK,oCb0avD>Q]Iقڕ@AaRt{vެ۽Na;Vص̚6OU|ҪνR*;y Ճ.ŝZ UBoowm$J!nv{ÊJm5Dl8,+[wWp?xvZwz8J+my$'jFVhWVP.{ݻ:`}8@%4,7%CV^9Mm}V"䢭fty^Ja3K^WL]#Ky/hUjEMȉ Ⅰ;"f7Fu/} \l Ii0P&lHQt˱p}PjAhpL,ȒFhwKi<)q׌k DqJêhZM8{R٬[i.D=BF$!G2r'16*c!OߦXN=ʭaI{BbW @ ! GDBm}` UmݒLm$T`P@).̽"S} S= 4hVt"'̍ǧG)n(Z` |7їGMYc.z,O*.,dwyGŽ>_ξ'ŧ/9/ɗ$@w`kcU*pDe $t%b"f<,@~n TUe3'ttL~Xfӻ'I}~|p_ sb8@{a}?~:tˑuf^]pġSٷ~w8o]?w86~+I7G?s6}q/s5~ά$?4=;\u#%hqֽrKW`y6-Ҟ͘FVMƗKA][&d')bE{rX$|šk7w,odq.^|<}}{`>.[痕ǣ82Jo, wO=zbܽQ|lϻg#ߎQtr铵;'Ԙ!=ѧ\Xx ?_43qwOc<~&'M/JOYMi^ExhhǾE,>=0M?hx§S~Fxz~Ël)݋1_|[g9&~=$ ^l[./+[z읗3dԋcd#d%J(>k>D/scuF(Ӆ>,J"[M7rKv,m(+[هhߦ8/[l0lޜ|{x|˷i=Zn?g̳I<׾3 \E',: ^tR dEJ?͢]/:@ IXLPWj߮dYmLlPVA O0DbXGZlvC(+uH?)LJ0!-;恥Lm#l=`]iX/Pͭ[͇4$aF.9Ӑ۷oIx-5U_ @< l&Ii&If~$4Ϣ 32b$e*qc=~%_cB眅,aamXyOf?V#-ϸGl_oq|]$bHHg"(1LRw*W,Unc9 1YVqa8nSBEiǂowNy ЇJT*(K;?esn<]0ܩjPIDj܍=m<ƣ: |tQ5UQm^6wMg͈24Q~P0 |q(ֆE)Rր54OSUE k5juyT{9_ 3nP *UCcK6JllƖllJ[0= dRacg:.b(XF Laْ+fcK̳%Ce671sA|hQ9PV2>ܧ7X`ӆ6>Vcq5X?̀* b24ɈńF3aђ ڜqwdlO<'`h,ƒ’L&*k,ƒl,j?LɄp!"3Ii#~NTqLIBM>4B3Oaԡ5Q0FMT% [rD>ƖllƖO%LIa.r&VD8inĈ3C3%)ygaIKLRP=gn Wi.Bors?`;knUi,>v瓣NVGG8\=E~0Lt0 cXk&z*",$MMM5&uD%lZ`Z7KLQVn䖉gvͿFgVaWX<}rmfF2 WFPF 16Ȏ!& "kDi*b4ʶM-]}F+;$SO:WyOL3[>/~lM_4][[k =%IE;@nS6wh|n45R{eʝDfr"ۥ,ǩJb\S'\ R| Ib`a)kʽ~Mvz[nŐ!(MN"¤RJbB358w`.8%@6Djk@QE YΘɊxy/g{iaVh·ϒ#{ӫ[K\WfO:2o[/xJ>]pT(oxs}N""B8*݇Zo^?g)I^bȇP'-Ӌɳg8E`i' 1E1[A1/-G9k)(f AÀFDTZ!aF$اtE80r#XCj- M۰ 5R WهIj{>yYɧg?s9p)9$ȋUa- fu!~ٓ^|/ գX"DZXhD%aIY*SeSS,$b4 n \Tqw\XcFs{6RT%a(;~:Z0ys4*hoA 1 sYfvoZrF V!^[aƬT[wC-@-~\;Pr[_)TSt۰k V߾vE;XYV?qC3Oz,# ~s ''ۈa.gjoOL%uE;Akৱ%cu-8q|opEynʮn `JxyGtn=_F] ƴ!uG{M,"M oKL$:.^GdчL!AP(#h.JJ*]!C },Jc)*V9!ͽ¿)mM'HY,n1^` cӽ$A "@oriC:N#dfD2 {tfSiyt˝)wj&$dcLJG~`f8ݳf,mNd&ɘ;=X[0L-W0F9]{&B`Rv LS$@"(n,sʉ: 7"o &,SzRx:smg5Sch@֋eLƽ1͊z :rcW;0A JH(0hP@Ah@=ke OI0#9&EW`{ LZU($?$D|eF)} dn7 ]ϕ6?oQQ)r[q[tXxOT]?n-6C~\ $w5:ZV0-[䪇DFF^(!AJkOE|i\!sY(SSxhqi&HM=IQU87N=C, %02SoJ;>m/9 l4mmtPy邙i@H0n +1$tRWa3PLdӅt:},3CXGj6|Bί߳+СR_,c-N(2%-)6pdQt9rU"VSʇLd/pҾ|P(8J\KW~uD]%`z$L/OGJt/Y=*bha hwV9Wp}*(:y[jU.(BbG4/brdajRy){TB&Vo5]"t ]Zzª`*IS;\ :;On~7T^3^՛Erb ،Cc/W^/&LaĻr~TyYnjK@vu@6Bh6p6iPMy;uVAs6~+vHɢhrq,c*TI@RuW+'u_sTX8?ޣe[pÌ2&EZTN =*ce+4 ]ы?w=f3ov ܤly$-L4yŔ~o׫"{` qnJP=(V?. ѲHDyIa1Tr=P#ToXe!*7@%~POׅ'"CaN9t TRo%ӕB6kT%g錨vU!:nb&ʬf ܂{ҟR,@rq >US:+Z09׫%x7}}z zbNLX"vf@ג$~ڒ3 K>%F0/G3}صCY:e߀>^wӏ\jeoV˹|aksFV'i:yU? ڳZյ*Oh)h틛fjVA>~W24yAS.?bՔCS't`*X65[O[ޱi=h|+OhRXbL}M['Ŷ|b4b|m\̯EwI'zLWIu<wUn~z͵y/YgT֒GѬ3[Gsqj$ѾڲXNʅFW8+`ŀѷ^YKG7tצ\siaFC%ZMSfB0s%$=zH 8`Ԫ6x2m,?eFuy ʠ(hV 2Pu+µ6h5!: El(W6BhCdžPsj4myAx!䂓 ` ,s6 lt~szk9Z|6yB?v0 9syㅐ9+ef.9A О<|Ce[qζY='m |=]dVvqFC;>z`AI sɵ')eJ4jzNͬo>*U#"zz-ż2xk)f=w*Ɠ?_˧G_8DޓF,+%^FV-`^Ѻ4>\jh/']N͘n:'c3b|FKU2^,>%V]׾+jU=Lj1#zV 1XbN*恜$kY./ 2*$ O2VZv]ZMcyU=L#Le]#WkӓSq6fȆNFs/]kc& 'pk6CYIQk,p凣{?.άe;}t|6UaWr ܙ l<ˏ>he u.,n>4M~xSkg;(_*_\]r3a_Z<~Ğ_|0PWw]{ SoN~DŽz,1۲@w{߾[k Evb0G- ⺟|n~" #*_/s~1NM9owr8uf,dwd!Zjg'Ty;rmϝLZy` twIFE9^5*I}FE]'2 @I"A9peQbk!G3&CuM^UOB?Zk]_/ WӅZ աӓsmjՀ2RnM8K܌ՄPl&@[jdQ iVH2o _,P{jG#5F̑M\!k!qk%L5Nߗ9m 2GʮT|aYu,7{21%T>oIx:&j@8fRj޳ .g*$$ʉu>m:2q4 ?BGڬرR]{wj4?#<2igT(=ҎCH#"N?<<سE tB:0D.I/3:Rq{h߮ J'!k"ƮڬЄXF0*/I[!a ^kd CL ryF)8` [F[HdV"Ruvha.1NBteWۦS&u=޾_^ȾG:,^p} dePAP@RtYΉ1"k ILח~)U2f-bk"9A@aKj=RQ.q k. kAshjyhh^$7k*>QDV &&tlBw,rQn^P`8L WԬgzx(Zral^Ou?QRČAξJG g#w1"Q3WZ#"N9$dK:RgȸS0.iV̮ !*xˮ90]a]cnJt6/c* K%N)}zrE{3PT~Ez- ni Y c]M  M:c$p!hSKh- FNvuEsZH觲9L.!9wԘ1`CHߜΨ72*Bd`hȑ f,왲sf 3N%`}H*C iJȥ`((}4K8$όP2: y ǯUzHQ$ 1f hVdGcrQDBĹR-dmuheưY I*d4 '}IZeM,YǖHqIfAZӆ$uu$=7XFc& ":"8  #(1=eZtr-_M1"$}0e7y#Tf7ãs1"\ьe~X3g0)cRt2$YVf \6`jϊ'qu]FE sܐ|^$hصdz:c, %cA̦ ] :32Y6LY\Iu1[oGۄfN}Mgn^W%Z~zkՔt]cի.~ _xҚ |]—Ya`_K׬0%=0u _*R5l|]/_ |/_ +64k7| C/׵5+ R44u | ו3{r6_\ }qk>ѩ}glV O {R~OVg'%u?O_,E Oȇ4!t,ߣxUlϞUX7N73p$Ԑ$7e[HB~H21;HIX @:}SBH21;H%I71&I:Y~$$1:DN~Dd,u v Oߢem88ͮYSAsh}}.2(I4ΡY%ŐdͲ$%T! vOgU" > çRa6XUtܓ"Z]ɁANȢ`Nk hXiDi "hPU_$JAM8{e3GeX AI2O~PtHRWo4&I:$H21;H2l $%İOɐ5W&aI <%F𐧄ZLkS™35)\,/ƃ6czdK\Lr H39Y^,ZΩBt.s?K.|&ӂٍ{%ޣ4lf2Mg[34[y 4f@h-+h`A nvf.{c&"Ӵ7 ·H'_zVz,]}<oUgٶIgOW]i޽G?ڵ|zޜ4L=Ù}x/|~9(;G+;?%۳'+?\l{`2-1ˠ5(b|BdHE]gR6Ö&>D#Q51랓9$Ynyɹאh;r mf'9ɇ+5ӯ7=ZtSjM.wfTfCZdK(ɩ~tP7T{NapM.5D2-q *Ǜk)'=4CPv9/7dBSȆR :j‹6ډ%7FYn\u{w?DK/qvz]\E=2߃m:K"$y Q}6[7h^2JRBt5aM!Cx&Q6b善dz3A>'̡)Y*=Z?/) 1݀ucL dԊͿpH^V|FYF?HA>q&`QjkSt*M PR&4+3n+T`2i~SWQ^|`X/:KF~ƕ@q)az$qְUnAaWb{J(@T`zY2ǴwHa$qkͩ(V{E,$21Aʐc(+SB= /`o偦dlT|fT6u8\L%rN:Jc` ZIeX'5P0g)J_T%3ej? 0:zNSs? 7^9;hN=S 9Le6Ku.( 1/C]ǀy݅ ßB%&~E/N }*y@.#a!qه+ = -':EXA6J& }W҆/&zЁR>P9M.D*aTZ`!*8<.ʄ97z i qpȀUwBq3s^d04LU_qk؇4}__~@bb4~g½wN<0QV /V*parpҕ`&+Z0T kpN!H>R XFa?s]cR&a(tOHx?VyS.wļe__{0d5nl1Y0uB0aLf: QB[Kp<8Y(dn5{1qo2L4Ȃoj27DIXṢ ҃'@B w Zp7g.f}#aS> g9EjڃʧCEBW-ND"Lux/$Q3d"jwlC<F)3%& ƩȥL1B6HkvN@= jEP8i:﷩غԚ= `>#!IE8#Z .:5Q󂧨5BQ66W`ޑMθ{ ;"V,¦Vbf VSw1!5 [\4#0ّZn&|REpnUmEVn%W@{uʻ1lq*vwt-W]H$Pe 爵:y$x !7鷓lޛLj\'Jos"؀;ӣέF<]ȇ ,Pw?vv-#H$ /B5Aya*RsGQV jckƕPkImTcAWz~/U1**gAa)$D i[٢kBj 5.v!qwe7Y[ 'nGk )ԁEϰIخnlfZqé:1 kBf`NFsԴO8 {yMڄTɳ?n5Q%0.S7 &% }$(TMkW_: _zƋTa@.ېl~N',2uUtgg oi6 Fyj ŗc.w?+$yQgn}Q\~,2@<oA! Jevר8Ԥ  ~zc>A!Ӯ@]6ɵY-/yΏ?U~s3~(`V sru½ ~_*3ߌoWo_>u_(z`+[ &ӚU>MP72o~jɭ_"Z }1#wqM) y=`mS a2䪀PGe ; ˿C@ H{9-룞 ko[CXj-U 9i@}y0R$g{'8bln 5aA|NA~ )ȯ_4 ;I-彆,eWyWi}Y )0Dү☑T x $hC 6)o%YC~?1a "kg4=SZhIKIRդ [)@VI}"Ep XCVr`$|c6^DO/oox_5<~3H^D 8ȨU8a=A)26tXpa901Az#M}Mjr YkAbrfr6mɑxA q(J?,%hT,%)Ds}Bs{Pp(ʹmMJ&g K`ߝEo7(,';m-cpz3.P4p?(;(.NS,r Qq#%)8Tg<2#˗i0›ы&[W#tmB"l.Y6ϺhLYٞ" 1vd5V%V')MJ IFqjF^b&"1h'%۲VaT\\WCbւ*Phe O\?#s4l v.t:MSM]JC'em7U!8o5 Oas1hYLtJ'%NЙ8A tNF)gV!OX | 3Md?S8y?tQ 4ŗjaSCN.ĮZ-'ѕ+DG$B?ȝؑ#X>,9±F1?Ty1ȱzr`} zg j?E*}gշ}WZT \>ݥΉ֘ZB ݮ}F=ZY!Ggw_}~^ yjm_.qW"+n|_;/A>xfhq=/_CE73Z;!$!TT{AxpxI‚Tkų7 'P >8lCv ]EklhNJ> ZwecIp*bR"&+l7u,a1=*Ŵ3M?zc' Gre1%d&*!Hs lZ#PycSIa$ M]j.6] }o޸ȵݹx2_ +{pV"_W58},]pά< T4-oVƊӃAS}R*pڷx~K̓<׭O508ojE4 XF]v ԮZgJƄS9\IcssxNs ?g Fuܚ8 D6'%p(X=jB!pLi='UcXwN#&btqB ŁainsZHO4M5#Z}%}IeS~us˽};c.M3vR-o7F>g LZiYMq{*ŵ-^zT׻'7Z b6$! =>l{W^6F?gPD^(rQ|)DCBCf *Wc qw=0 _[O}V;'9YA4"G5g(^!Q$ClV ~aP1),N6~:_R [ؚ)ˌm?@~pwoCo`99:A^?"ypק1[,yS?c?0uZjs2 .o~juhw>y=`mC`'`; ˿C{f>Ϧ9{9-룞 w~߲p1Zr(l=NA)I U&iPj Jd"U`ΌKN _˟?>L'c}z^ g!@177AD"IaZ20D߃`Wq3V`гC}ԋz*٣iZL{z^-g3w%%#.5퓬3_n2SNk:xuyCP[}< `%vJppXU JBQ=l1s)>8Zab}~ьWhBP}}*`PZ*p0ҮWY{dCc%HhOhI4gc%cJ#|7G$̷. cD&ۓ8 7eMbrӤaGG%j&t;A\w#KCZ*ԜMɓ﷡1\&JjG2~~%o߯] ҿe" O4cu<|gq9Qw7pql5W>[ 0_t&t1MyƬ[DVT%bx'}N;(4ZF2P FSF6tcͺ5?'ƶ8?a}+7u<6K0q6/o"Aν6R`~|3{a+"BNr:)&WC(؝DZ2τa_l1L$^4c a 9G!gd2e_aC-}=ԄҎ&CM·5Pp&L6)%孽R 1|vzv1ݻQX:dQV獛FA;ub'B|dJL0NxD{LUKF Vw#D3CGor`2k!f)J*Rbq˄:e;)ځ_r&FWg#y(rD8ĠV?] 2O}=J\~Z$̍$$LqBҔ{>4ȶ# B&*D;!7GltXӺL@< ?lLgq;czɨɞI~Gm۠0?=M"ٰٰnJٰͰn .χ&kڙ_/2gv͘">.;]gF4ͭ}Ex9|7I,sSΚ<>w0DJעq֪o&c,~D.\^ۋmѸ'.vgURv#9xd |2U  \pWhov:6h$ƚi"nd K[&{jqb7X@[xN3xq~9y*w.~Vb3 A'ŷ~̽Pk@y{}YVDfFEnN;7}Hkˢ.\,SOR[ 5di0ÕFpzeXB)>zxf&C MtbMpgEO\ϾLtahuusp gcHn켝Nw y7Y_9@+'c7Y“/ؿ>NҝyByzO>L,C?ƭ5Ɲsu]{M4!h 42)g&Fلϥ84 ݍ?܌+`pc)5kya0Y[{n Ր^CK-W,֪w/:,~䌭1v=GN}g.D'wsNir Nsl!Xa[Ӕ˗1 Kl9I7L`eЩ953A{/.ih on2/^Lxڐ@PmQb1Nz|[Dx4u}ztYO.^vxJ>c릌{^RӊBe(B e0|q}`BODgMA¶:DM„ۃϦsp|6m s)gS1묲l dd62)-7{PGUS 3${'Ye;Dže\NA~\ +石 ~8Yq:CkX:nj~مc[.,LK8] g+dYCv%BYF\zm~_WIF2Ns鉶TՆFB"zLiFmhm… S+F}\;aaԊXN5#kz#Qxez%U}̼:WR=uT %<J-.։VFBɟ^%{3W+Q`^IGɎCzP}^Y8ab?㱽 jRK E E0i𻑓{5&*k v"mϼx y@F4{۹hA5aȀSy a!a AH#12@>Þ~ۻFӜpH(_kcEF :t w@U# Z@L (ҥI1w(>J GQ }:oBDXjt52R$= Z8`!I<<{\It(!p se"BQ"CTM>#ƁQ2b£5%^ ȠM#*"9&-w`1mn p^8)Fal7 D\ cs"+N@:#0֡VX-7u7J_¦n!ј(XNBo{d=#}uWIS;C{CD%G e!h.mqZDYM`@s֥:p? e:‰Sg\yMغb-M#J}5@J1xQ>%L)xG`a+]t⥅ǛX.rO|nVԔ-&;9f}V\18sZ ׄLLSsW2O K'QNq65C>\`T?,KTρ5Ȉk&!IkCv('"[?R>:m (K̓A_gyxzġ[]E6y]YMs*h&:w1!Qt  O*O=fƽ]Sx>Lҋȳ{4@GCI9Z,w Ԭꦞkf0 [Qt<9P"Krq^ 9]dxMdK͐{x e R&(\[KEFtuq +*d  R@j2obXN;h)akŅ_W9로}t.{= B@y?79q8nq$hV NP=*jP!}[g\H=>f1Ib|,ml_ASZ͕EIDI1btUv4\|%icdO%Gvjs(|jljgF ̵E}|$٢5=!0> SEc/vbz<*NY/}6 3\6EZҏ8,H({r#vj>pp>Si/gb[v[Zc»xƖS }J>9 '0=f56F{x`JOKLo-R3ջgI*T^=0 +3^;f;ý{Ht&v;A=1BNdf.l ~K #o9ܺ!@=7q^%>*\3hz7jQ)䩽wXj&2j+r6Ʊ'R{45f 3n/TYmb-;3BN#Ķ"E̍f<,N,3l~6*UL2N ʊ?SWdRxC\Q ( S">QឍlAD-_oOGN'(<}L`VCqFovJ8s1xbѱIE+xi›t hU|ӱX $s -j|;T0f *y槒C]*OʗHjc8^i0ȝl.w$fX]AAc. Y[jUA1qIj/wLBsEs+h[%w!*`j/w5i䮢dl6O= tR_(:N~bY}'<8*͈b5= rB'` Ƣm{n#zXژIq77#'-`;FU@o2Fu{*@lКj+beQIb'k1Ξbp_oJU;ںCh떮X@SJhMNJoX%K0.D?a9iΩ>ER~*#FXi }uz/*IEG~t?T6Y-\{PDž[޿N6uAҲBP 011TҜ&v#I޶͒]:&NQjGKޡ.G^!A^js!ORU1&@ oYVzP]P f§֝J1z*f=l}y>`~jOsI9|2>؁"]>(HzZ* yVtQV3$px)΢r}Wd4kioڠz>Z–-ӼZ X,J7ʅcE`,m,o- 㾭 ˞V30x&aٕZ3ȴ9lyK>}ө<h(pJ{ے=ԭUX ω|b{fg>MM3\(,NkL[Zo(U`ƈmEU\Mj?RZbODmiB6J0E-6As|yS2(j'-ğEoC( 1ΖXI %tMԞX]뤎6UjtnL D@$:vQ׌6@At4*4JHߎƗT[bF>,Nkz@%#Ҝȝ\x2Ǥ RaqS#k DihHtu%-tq5\]Ay:#J>l 4Tn4T(h8݂)7 XjAS+#ˣD\{;Fq!pp=6A';=RWr~7\0Pq6Ӵ(SJ˨ ᾝa_Le2At7120~JYvt2=%đ?j\Nݮg^nпO2urաxi4&RCԺU@vQ*Jll6!i2+2/u#eSC} !-3ڪ&bE/C ৄA&@JpYŸAhK8tR]6*,JE6S)B;Sφw"dL1%.=Cg )j>b-A'f${<{a9Q ʇj[Պ-ևAɱY}oOV7"A*h*h4KG 'ϖCg]*c by6kgCLC г]I Z9XHsJklᙐ"Gs*]C3tusSe[`Kjѡ*Z;M D JF(c"ri|%Qd 'p}t&-sIdׇ.\Eڟ n*q2B(ugU8pXߪoP"zl:f+ N]Cg֕n3D ǹW᳚S@_$a֠a[o3Cxg₇2 0O`h( 2dz:G@2+~{E>gD8;*;0xf6) ]+eK]l6[ c? 'juOGBM.&k/L] PX[>kH)jp ul]WQ(3Xz;q1*FCz!v]QXn =bKiM߉RgKW8N)M&bTGaM~WQ G9+drլ bGŦEh}IX0;"`zzՇ O&l2ב^¿<[x _~1k><"bm籺̼mko/m?,@GˎoU\-H/0r]!Y*v>HkNR)W׼P-w"EϳQnf$<ҏ0Tj:g*k=攻#+:o_2o_GcGB`WPՕSWAJ6UC͡A,ds'hf-Ṕ 0QRIU&0hqo \اM(.*{qsOqxm@襺^K4a𾖐Cl_*'ao/k=ʳ"1܆~4HW?KnLn*x^Q.gF^6h8kaMrگnEJ8,XQJZ((yWxI]^S\ރbxo蓟.ԓv6 8g- (g$W7L7zɇ9KZWKԛ짗WOt&OD1;pu}ߝ7dhb#BRqW7Q<{d$86uݍkԥϖoďlwf:EIk;2-A@k286 ލzv/wudO&F\DaMD[qǻo~{uiTK$%Qrd}B8VK bnEF3 rsrh=~mlF q/|نZˍ# s(Ֆ5I^j'Q(ͮziRSӕd/1uNf=,H]s#cA~v9ԛB@_FFRbi[Zz մ{ᅴz Yܖfz %90tױ wRY- 92W^*%ir{ 'm}ށ"NwݝoTBܡg@(+T>aV"a{=< ϔ~%oYg{RGi5U Ih|rss,X't:1!( }wGISF bb(e2%t%O)@Cr!C[4ʁZpmmj mmYmhk_\Mš[TJ4߆1c+=mlq2Vd̚ df3c`rG߇dMyv AQ!S~Gsi>v1a- 7d p]zk Ka,in$&&&IĘm<'둥ơ=J!k/}p\RtQz_ǫB0l,r0519b%f-T*!ǣCGd9S_W u&zDZgMr*i.}-og?GBUul2yw_ u'Pi2 Ϙ'-R7(9ø:%qX0fͲ5M~5ތ6?=ޥḸ/^߆'?]'EkmȗbWh3ξ-ڙ/P_:m_Rrlɗ(Y(QD Ck[mֱۙ9 duno|3D췼iNUTzuJYNwh0mFA?:kEb̤2ܱX|~=^Gۃ@)HuW"k1b]ryv˨ܥ2<.#/N,GJwlb2uN'4ɤLہ*^-G:(6l9hi(>g5닍V|r2xpF~=X^wȷ ƻRugP g៮y[ӭg-dl Ur9ls@x^EKc%"rͦQ&y߈Ux;Q>i]*lsыDj-\&dbhq"b$8 cD@)!8LIt1Z$Y(g.y~2Y1k0vQ:KFQ,E ܍ IS& Yv' 8f.70@Z ^PD-cSzpu-aq͟L1t\b  Cz<ٓT# 6<.*JNGuEBUי8#m*͟CJ1R:=(K|Ml~y+8d|HuĘaFqHD)0ħz>3<(\D$ !Bax#nH0"Vܾo CnBϩflӉbPo #!6`21!vpo Fڣ ! zG̀}Ft-vKHmN$طiƵ pv>׉7@7c&vt}ՃdTƵfQ| eu甓y_# 2P^nO@c';BCxSPFMֻqV WV`0k/JAízE9LN_[*bid҈"X @؇>mvz/z`pۣH^.bQ^0V ӏtƸp$%hm=!&!֘sW;!'1v#w*BQ渹ֲF)gv,W$)>MVl"if.Ezh+%6|ӫ=G "r%OmC (,$C)g;Ȯ wЏUsQ7`-a_߽wW|+b{XAӘK)A3 aDCmbL4a_X@&p(K5c n6fr+ \|@b9s&J?-FG}.$H'B{%Ydw {'g] A)4V1 ƩFhJ &\A)ng1ĺٚ]8kUʝ\'cO-sgSCKR*2&(LXΥBǚr5B$'dHR`Ym za54qݓ%_Ts UWY} e{T^1>ϊ10-!SEm0g DcB`IُݤvZ1hd%El֬M޲D6%hn`>;]>o!:w4Y/dg3jge|xWV_]n{rX#2ʹ՚qfړ/y^ѣ^>f[@V]ɐpA!o>{|t'b\*v+V.vUA̝pjûqn]\$g~޾@pX~nzzBh4 ?+n[J+Sed3Vwh;G?,'ྐྵ$ TB_0C[rwܰqR j,0̫X oQ!K}\T/\̢PZڃ?%Xp d5w ,A\S@Ek5"?(ZU#T-!z8[?=7BBq tG_>vpީ Q֧H%_C+XYyoW[ s6@3%T1"hQ[kJjl b ce !3Dz[QUD*"1"HY)`с16ײ2p%3+s $² (JZ}CoZHtuOrض^|aA0a%RduMF3+LAH|Ox$PHAi31 . KJE#ΥrMg}jH!8t@O" .ۍsR8f)QjTP&)A cS)SiTBu(`M,c؛FG-3@SBf H9ce Ř#*f+ h"X<>ouXU\kx2 !*j/sqL~u2.rw0_|oHjSW8*-z} /|raİY2VD>-}jo֓/\0B*˳@. R,I'NMd^;NyaJ9נ$$h%\$HEVЄEtc90)cA< h8N*m} ᮕU BðgVI=ͥQej0p`{ cU:lMJ?> {Y]xVb2 .<'x( M*Cq*:@@-O{LЂΡgae~^j4v}2jYXn>)Ex @-XN'oBv5yGY|>z;0# .ۆ-Y-  ۉyüpP@"15p<:p;38J^h^A?@L`PٝJ~A+"x2"*XWZt@J[VpS]j =vy2B_5>lRTtTC$=4P %ᶻA``gHt}onBJ$'(MD%I{S>2D.H;ME9-t{S԰bFIVcWF\"h{ܪLKdj汕:k˟,TuVқM[X)rĬ۞5T!Xxc d^U*-lv{5{ Mz;⻬bKb~qrO7'/ym.]ZA16%hn˓gٓgJDpxN&M.ޔ@i +XY7_=RrGY+Z!%zR聓?#JI&r\’b;O'DVϭyv3'fߌrRu<ΞLɨr)gxfQ||{=I<`k蕵_sۧ+߄uvB%a r,aX06LQ km{D( %0u I ,GL$&Ҫ_` POdgj8tu8 PvV N8~g#U7@g.ؐAW'R ~ D+֢( 5fjӳ/o#ƒkrmŢҬhIEjN'Be855]} l9_Dtn6KdkSS,"SBF1E(J2BƚJEx8•P{4Zp7BD{nEsf//  :#F9$# h %"L0V?z>eV֧la>N gДO XcMb<ʯP(l1I#//P8>Þr!*t5 ف1.=)Co4ًgk={e˄3p Wt;0%ơe[Y`e X [uA+aB5U6i@Յ7VRЇ #H;.8B0hp^{7NRB ^n'B:tz pCK^5 'nyx)'̠d!<`sʼn}֬O<@, hHb Baa4nH@MWJv+\?0Sc(k7rw 2N N`iLLM~'#F#}('#i#3h05+GK:dLDd"WplN9Ţos}{+2Y ַGo=zU,05/GrS%@5HծU͈ba+2R`?tM i+ ~re P ;Aca#U&P(34hC!oθQT5J DHZ{PYnLdždL13Ɩ>F@Qf/L{KE$ qdTk/qB JdD21~ۺFUM%H쩡cCR^AGVlUC3[fA ["W&_L-Z *3 o1`o{%S$!7`3a ):`ȇ0Bp;P^J)ɔɇ(6b}tw/z- cqX T>mIq^%QuX3ޒ,نEx.\[@<&WЕPi!!ߌ'Z 05HzSL a4t'~eQƐ!gD$ZXVDmN4O rPcNЩ!-Q:s{56 wR$Z!۶`7r=.v3*8klk󮐭7Ʒھxx3P2oGWydn(t`UswF:5QQ);uN.y;gck X,!jWϴͫ.$cR KitF3Ba0Z\)Cɼ>h %.U^C8'RB< Kih! 7`K$Wէ:Vp4.݇ 54?.>āeTȯެJߙruPt3x=fVRp*kVS28$XWiqij:$"3y.Ϭ󆽗gҚ3|عa B)av;ƺ&!đ\3ʐ1Tff*Si GF3 C$#0 L!n׬B4=M$fih*To,5!Ȓг]KЋp4xE8 RƜ휸CE8XcsTbe_ q)WWZ %Dy1nNY^K85`% xK8UCsp8M`!i1RtMP!XI~( JvhO  :%eA* P2, Fw^+6'H{0[4YC`!-c BBe. Qs)p{˺O%+\פ:~^mWn$@c$4ɱϨg#?\xYb깇ɋęi,R.J9əFgPNkI@Ng΅ IIUi8Ɯ6˷ͬqy/_/.=sDF4DR%"<@bX$8Xh9ijH'{ޝ_c8/+ V{nh e҉e!8ƁJG)8r´(mO=}EczW9ab,0&;Ewbv$3c)CY jriC>->fH(rpO(ZgdP2 goe)! e"?m6%8PAKH-D3n8P1mL&8.^3p=q&Hr(p:0!R. 8=)FpE 3 eM%XT򅕞ւbqΠ}{K0 ] ] ] ]Z`zP0_;i^yk,dv&/mÍNl>pwvUSKAWD{0aSg0#og߬y KO<5V:o̘ HX@kp  2Kֿ"q DžǥY1rW^* \gbe?L "{01»`! +$&D++bBjE<b+k7ZPD. fNR \0Τ[a8E2 X &|FcD@Q:~\sg?*ϒ\,W7#puJjụ?` ;aDy4ou [N-=;]PgN|l{vykM /lbu7~E# E:i*)I+al$>`ܿ[Ez ~E%( Ś `/_[`: ~%$  ::­*06,JJ1iDN]MZ~\8~0?EOhcFqӟQWr;wcSV6.f4Jdo]|`[ O|&Ec1b%~PʀI #E\`x *-qV:4X`b |+ZqBWH #UD)y4;׬z= (3d߾d E&{&*t=<O26Y7|n2z ž_;ꪦl|A:4y_#xI.ܲ!|0A}а{/q6&EQQ88~{aC' *m鐡v*_5!:UU_PLJ"[E!`1 p{)܏'9.܌~ ͮ3=t0, )?]KB a}3Q}P1Q߂Fx/!bӞ!P%&(K'6VXZm)B9WR` (Ux#^*^ :ۀZLaOu{1枣Қ++c❍bpڷE((fyB+E?cˣP IW&i rD!Q9UVG|U ;Ov{~9x8B=˟9>Wu|{r]Ma%=a)vyu^j!^:K(nYv*VR(ͫ p!5э(kW˞_߽Zϊ^pݫe9t XP:qewYfC6dRϭk⋇ .@$^0g]erOϓزxa։ 7dټzn7Yz,:wQ6-puP~l6ɚ\_Ol{qăm]:)v|nv)ñu({8{~~h+6 ,LkϫWC^kZ^|Ȇ S?y'8%m yфK9ߚoAUҍt/>dYB+A!½P?N'ݥwbXq@0K÷z!ɗ3bvʷ4~CcCfm;B -qBJ}sv;K${[JG;gqOz2pTT%z#p~?A/UGYZ݋Txhfo+N)lin'ʗY׉~,TئZsU-o[m|O 2lx= O=jTI߻M—7gdXWjg 2IF옑V)yiuҡi^C/kH"w1}4^Sv4yau4`3N)yZOHdcLȝcL' 0>#;'Fjt`C&LEWIIrfTt-fL$?U0瘢UhWِӨ2P%@'ɈƗkՎ1_;E>RƙGWkb{Uc%Ak3ba gx;|&>\Y40K ?]Nm%Ԕ J FU!`tP<)#}O MuPx(:* jǥ5HEBX{&6}9PP/]VM nj&h"Q;R2zeI!T-8kASJY=OqxO7YAtmB+sӝӹܠ* > =suŸg랟|sݟSJ]vOF +JE9;L&"Ҝ Iq;tC8'TOE"%T6>rA _><lRF-uXsgRSsŚ2/Zs|(8Ãð [mʹ~Jp.YA2e@uxz|!\SlĜ>r~bʖ|sO@j>Z9:Iٿ(\x{[SˣjUL2ǸS#u>yǷQAQpCOCC>2ӄmgӀ B2.=.&94,qG4H-W #3`||uyU쿾D­wfjnRx0ߣ?N9cf>.Fy/ -*o?߂| ?3999E/;v?f, BRAZ3y4erѿѼ9ooFf3*Ldz3m0Q؆ތ/q>b3x._B_eG_hIx}a[:U,ݾ&wWOfv_^,_}[zxOi+7f Gcf-9{%xfYJW|1. ϖJs2ܟW__7Jz9j)/"mhw0NSoDbcH1K C Y#d=|o XgKn/S )dG o/ۋ\?vxkF(~XP]aނQۏwu~Pnth E&Ya mns=B UDv3+ܙ9Ui!11I+9&2L`"$O2,KA#g8jJ*n;bzǬPq{6$ /lT?] 9p^tt(.IqWMRҐ"!C="۰(r]]U]y IK9@\ Z?R /[9=cQvm+s}F)G%RDBp;/ r^7H@gHDF#"IW>4{ɭ A4Xbܠ؎P,Hz1uP+ HZVTRt(7ֲ dBThш;]"a>@8a$scQ)r@&nyh!B. CL| HbHk6rd&"##IEinP542V`͎py0̂iDS#܇"pШsФL-! B+m 4_~U6sVjմ'ٔ=B @=\E^ H B$ȡd)ijmBr> 5 4dDCB]̑ BJ$02CVFϊ~0q9DE! ]*Җd]=t/wAӆFg@#Ia$N`g@x! %(tʼr$6 E I0)M:2,9)HQ܀Z5@Q;(<Lf &_ +ݫQri tOR\T`dw|l#S7$䷨h15GtMj`|jA&:g9T gh*R I#)-I&JHheNdr^  Z/ K(/}ۙ<f2jrm|%(?E|tռ4 ×؅1;+o$[ ;jԱo̶ϻ\slfY5\~"N^_ޭ<0 PY>zF]xaz1UƓb㾽 Ά)u~LA~rCzB ZBow. }$}V K7306,*6MVet6ժ=J-%W7'?Li?nz-h9뙤"Yfs>hewJφN\Jha+m4oGT^ K#?IBhGPM~t-lZVٕE*[l /"h3Uk7)ț(vd/rfφo#mӛm{ J} pr^+%4OVKY~.nhyg_Rt¼X9|ſ1MoBw_'kX}O7LɤPK68n4/IKb21>scɑypF}gxy̥" >˽ ;Gi*a0<;WxM:_Y!mb_oD5. Ҝ.Xh s+BwWN安Ti¤~]ș-,I`%Cy O噿v&g->R6(*ѾCoiY<'sx|?`h;/%cf훜: ~)tj^럈'#x.aBI`LIVD %8ڨWfȉw Bep͋\Dm!F]mUJAAF&9z6k!L"#- ɐ 1dj{ݾ|țlJC &ܒpMeyzٻ̹#ؤ3N3zY3ܠ2ތZ6Nϝo?͜]t~Pu]!Y#Ya(Q[Am9G ԠLe{fn\J[rڑ@WIi; he`M Gua݈-ꙻxؚ(sw#^ōxnff č5l}|kuvc-ZwbzOQiBpyC_%NF8"*gR!F^vUxzTl#K]kzTEQJԃ^vJQC:0A]7y"P[| @2Q&Ӕ>͟hӢ}N9jjh+@=D6VdOXeǀTNGHA~5t9XsQƼ֙d)ɬRm1߁̰H,r7_);ghjVTP u[FҒ.e)e TM@A_ӯ \ٵ҂,gQj"KGP.IYڔ$I˂H`l;{;H-,mU15f@bحr}Q3~OvmLJ(l}nvx51wmh1"V{kXc}r%2bc?x#D|wd5Up:ue~pvڙ_g7'fmRCNoN`|7X/ET%%6ұWk4Y^if ~ Am.9F4FV1"`lw)kv=^Tu+7X0xiMbx9{Ho@d%(xuX#us=ІQ1hB:2yT!2+#3l=^ښNlkK:Y]^!FWՌ ́廩${坥G_,츻VBrN/<[]_Zz]"L> /R0toA'/_TZ-f}U(aE׊s4XanG Bo -(ꪴ1V 6B'#*iG[5@T9&idm:~ٕRh^O(!AEGt1~ųq=Jχ|!’29a=A3i}!2qFj=EGE%*RҲC|VYl/pz?{ Jd-_4JgRO#T3CRC_+IhF^9aZ 舋r҈JH!!ET s2Y6'nx> R`S55cNwkfqLT(%E5xLEA,8-X);w]QcEYaCgxdȣ΁Vh1בcl9yH+;{+XTVnN۾妛b6oIqSTB4I)2!(kM&bzl[@+a8_b6(ty s)ڷSK7/S8YMjQyBLK׼[65Z[>p iF޶Ŵk-SwE RȕSb~ٛ{1 bo4 f|uO+=6lg֏FXa[]!^|c3˫+ --ᾋ|`~]6?>@7Rzj尿夆`u!T{Խm9 F 쒈 c?\?U`ZE=j6FI-8&̲Of5j2D`[mqbOei ki$ɶ s\GEg%\gd; I˓L. 6YS݆ P=&jUmCD BMTB(ix>4=>Ę^dY.j+ &"4A)1`/EZKgdsdH,MKߜݏo;}k 3LDfk LfC̮.ޟ].F]U|xK*oWC @av;twL/p+!֫Ù>20pHCe7>vu8$x؝7\v͘YU-ojr2l)~0ie ;~xu4X¥E~u?+ƴg4$58o?'yQ^{s3x0Lq Q>w޻^ElnOB:b$h$J40< ΰ".9LzKoa&rR zD>] ljlYj]/l<\9Lrkkύp<2_l;(I Q؞!E[Rtx1*6:ՙo"hmYUE%Hũl }AEDb[NT!{YCnٻm$WTrWG/SO\*u+olˑ^_,J,P )yT#<ݍF/O9L:q8+q(w>d7`6\ kw*iLD|s!/~k0'+ŔW"nQIE)˸ hv]ⶖ Չ8(ZZi~pnOgI ֟"Grs` QW6؍]ͰH`~g6.ZV_ʨ;~tp*pU'"w7L!/bGEƈjn7HB:"`K2a^I?쮳&?+ڑFLu!{qɴQRdfLc!Odq$\'(z4"Lnmr*or] {fo}(9׮Bǭ-pST`}DMPk:jV`>Sqa}ey So0  *oq2,6. =iJnB+M eTpy F$S#[oiXk! (CL3]3{LY`?->fOZ(z(qs|]&TER]"c`hSej/~ҿei i}F->77d ם"+rjhإK QvX_m8S]m{@-ű&`Y 2+$.IHt1 !IOBxBējZ!,Hu 7ZRd\p)\pAMs+6T0UXDU;ҎB+yuGC8F;iW rns)|tf@  Տ* !lPޱV &0,NS׵Yĵu9jB"B<NA!2ê8at<3]8}lPP .]j ,@' u IA3 5h&ͤZ I +g*KS,U#/TEι{La[5Go 68Ք\NxfJrIIu*׈#*2  9^8rV]4ӈKQ {-ש %uj'5笚vQG~-k?gIs&ϙT;*1+ uZ)͙qYP\hah.,bkGrXb*xP,Ȓk^)Ŋg/S9sbjze 󠋼dD]$-I䠘ԂߠmH@0ᠩ Z͸+˴z 3.4$I nc61>1NPIXɀYFdA\q3cs sZZ:3pkRD+pO!(z-I2}6r|ɘ¤1H.T<ǥ^zw·WW·8+zWN}G~'@EJIv.hLA7lvڀJB qX*)1>B 撀u"Z(/LMr' 8<Ο>C=|!l+(r5*YE,*FNkjSj&70aQ/ Y&ރe`iTlZdf$ "]rxEZ\ؓDMsXHa:sҌ9g&,(gg&CUWϟ3W}Ps2\QK.4L $xaƌ D(GgIa N.-Y FCzm5a(.@.M7b&SX[,7'<<,2*f{pti~pWW9t 3[|>mp>j/2NҰic'QKngל;gaaW+Y6l71gW$BY9x2 su;Y(1PG}Oĝnջr**= <* /tOǢVmWbm/8sP_Ŕ֯7h1{$wrݮaaaaRUCKns/ 17FBJtnap\ӂJKe ?tPڡOy-3ѷ~ + 2bw |F6$J!Je#冁d@Ut.9ꭕë@vkb$#89Ds.t!"4/7re#9s`,دC ;@H!-&J]N4|9iqJB)q cOv 0jdy \K CH.'E3Dyg2'L$R5C W)S9kXVtTjw8y յumY2:~fvr%5I&9rA~J-(jI^$vnB(_\{hGB(_|PoA&o$ջP/d)喣~%M\OHk^"R-,g履#mO ?[6ӣKv Ƞḇ2}ہǣz/pku40AkĚ>HEqXyw*m0Mm9X,zG翭6 n%.xLd_"Q`g#2z.H:hڈ$$٬Kt>*B`]j2Qp8pʆg!5#|kr_өp)d),R&%}/;ą ==;kY:t=Nq864GY9aJ6ߛOWKe 5̮*ӆӆH EwѶTP; IQv ^h,0Ukd9.Կ ) ȉ)"ht.Ԓ6*(s @,CJ~?mpz@VQO5V͋qΞ**G5.U뵒{S< 5!,@_|/~6=eNv*!t΁4o;}s୬7z,;߿_yлr7#n.ܳ }F0wk;\&]yLS<*œZ7.;2%"eJ+^Sc1 896\Z}p\҂zsBdʁT3 M `H d<ixYr=/'UT==Ǐ^=8r_[sjzZ49x9Fhϥi`Di!w6JJ@EJH{Kq#p%Ei PhZ2滚Y^B/ä|^*ĝxn~@:T{-<Ј&$i$e )F[   yajLA/{l!Qtp)MzagDѧ # Ktv"Amo4Xe+Tݰ@GmrwcK\5u.ydXpe4~=X;Ϳz-a{oU7[bʸ<\ ŊQ΍{i \a<4T|b=o [yO l #tˢs"$9Qd< 3o,1ػ@Sl?tRcJ.^aH{ۤS/Z[~$3M؟|Zw1Bzwj3fnackP[R2;f0RB-S8n9SbZ""",;6މgIƴr.>| ,c+y:&uV`tK&QQ~ç~zL2ǘ\P)!g ͬs4ge 7wD*SJL cХ8N?_NkrGyju՝[D90DC[ v?3Wz0P( ̠n\U;gPً,ITMR&2=. 4yeW쌁pQYup{ۓܞܮDb2gNSKCjoR^;]A rbܱ\) -tm %9 +M_:+DPahp8Mٵs= zj:뤆_cH(k9<#,qZTn ,3ZÎv*ZgSJw#'P5%ׄkcV>osw1=Z5ccg)Ty݆UC$s]YsG+<,Ea:V7e#uc #y}U Fu֗YyUU0JZ]J@} h0$QH4sIHHSAQRZr-Z 4T8lvz1i c%E _n_z W񑁕1dYr~98Z8eתLA0o׿cŹ#*/91ULQ6Ht@_gLhYf´&$?HߑF[E֛x&cTX2Qwy7w.?-=?G~p˧aORKHbK>㡪`V?er2Qԟ / Hq)q?=LNju>p=dL$VS;zyAqbrKpl1%??PyZcP#Jшq(SYY4X+AJnJn.Kl-I ece.+-^^'#s,}wm*eQftöOU.W׭xr%?NРxgeIr 8}=/ QƨVFv:%Ns7|*|||ak,Wltnmrd%. !TMoe4{J)0B'15}潓dP/xUeϏw x˶#ۊeSj\kF>Ïiq Ko@5/4;:[9`ˌ.O6fuR"cŏ ƏI?L_E1rV=#+GP y}lg32U6M -NꤽiW<*r/|MOH7B+W7EO8e~ܞh쾈Jr%9M)O]ݸ<*RԸ"rdMyA9RV8lJzOuYeuUQMۇiuz>Axd}ʓ7žh bFr\*ŪbY{,UX@s)VXx?$onw|EMn:e.ڒȍc|[:v1H|uymMM$ҞO!Yޒ Na| 5MaR% ,Yg]A"!xt0%m$-;m5$6v9/׹!ZH fFZR2o7ӞctwsC4 Tur9QWV8}qMh죔4VaJ[e%@}dVd#]^pd2~%ŜۍU`iװ 41/y`ƒ0]_s1)bQ3oӟBF174R( RMbnĩd9Xigm}Mh+).$8|2l/28Y#8kӑH9480#1O$pɊ[}Qm_l{SPRѹp-R4 6ͧMKE' 9p dL)X.򐢅@瑺$Bm eԌ'o`hb( %Gb‘OTgqax%_`J- $ @FaF!`E"C$qVDI"Zxtdn tKuķ5XS#͡3-Z_LSTjWuj Z֕xcbmSKDp=,m$e :PAvI("wբvmƵk3Mkm\Bx57Vb_:aP)I)xYO0VYARzk4M Xu_d տVJOF*(8A1:nL 0t{a8$hÌO.,?:5˹_<~5LN79NpL gkr34OVۙ ;x4gZ'k "c/e.rs:烫-BlX>=X |bT?aE*/!wݻ2q?{:~6_ O'w1O= }kKKsD燛|> ~ >ucAQV;\gL Z G= ̓y0\W|V!wbh)g "(lOa*({NSzd]t'4zh/v uX@9O3M ?n鮴:oj+IRzu @, `/XsV0D+yM} ^*y晣SŒ94Cїֻ gRi݂ >ky7h)~kJi5)}f|91sRΊK;30 1:ՂsXfQM%cW GhwԒj߁ PK`P^y~DH?6I$Q89Q/$шp( lpk趬nAzPCO4!بYr1Csqf7 f)%bPjf@Q1(RfHOmu.351P-$t`uN;UDm! NF""hHN*41>Rm 3C4 f[&A E@%60a\>j1d!y5 @M0,4U=1VrZ|yKB 8ʚО JV$ $]31.'p,&;$W2yPt`tR6(.0a fJ!! qa@tą}0|SΨbbcLS') K㊌$jj58g1|ybŒ Gj+ЄRCV/}AD7@ʜڜH A+c 0$\ JK4\(U \hh/%vߘ %( q!WѐsVRk0 f!:':_\ LyC`"9\1DA2X!l8zx}|F_~eɅOq~7ً1*9j# y^7sG! n#`ԦOwsyٔa]g-fʙ]U;;C}G!ws^dKn i{ՉT}Cy1AQt 0iOIjEp֮v絭4Ԓ5k;s꘵!όe6/5|溼]Ms +Y;䬊Ew_=.캥Y{y;!O~}1H!巐2>D?feh걖=Mr%b̋2/˼.닡^I!@gIŤ!**H)B@l>r^[ e%դf{[]a-Ps[\ƛ pw1|>R-h"8-m9e9t )./\v.b s1 ҂7ΡzlQu[q1\"l43cPa' &T8t(r)WРTJ:s +Qz\bJ$i|ʕߏa~Kw-U(*q7&IE̩{ QGܵhCs9ڦٲ3i3Gst.ԑҽgVCD[)gjA鬝pC,O.U罧r߀)z8[Bn7.І %|@X)9um[P d(>J ˝36X""9!rm9DoȿҗUPx4 WIm^6nS `sWRhH΃-ڢ~jTDGѨVe-1'^P 9pA'Us!/$Tt+hb/OU]Iٔ!H@3 SM;H-v < \ΧhQP [>-R{B*kJ[<"~ T(W.׹MfBju[Җ#B@oX09`!,(7(T]dJ{8_m#todbn:Ӽ)li\L`va /T -e8e.J,`+(ae;1c T3֊FF:ӼIU+v 6Ɨn~:&,hIP[n`{Q &ucl|+3 'QC> Q%3@O#'HP-W¡~u7#}@?}Zj]O^z=G;`\Nz?on ?;~vp9Y}Sx'LCuGgn0#UzUil2V_ͤ0^&[岌j)rsVϬ{/?~>uG̠1ԙ8AJߏפcGtnGi!\'bf&r%XM`͢]+9-3ɵ)9d֗")1,ΞPe>hc͔ n`b;܂Q 0Uw"D4)1M M 71ˊ4Z r"V֤0P]M)*ӂ:R14k֤p &fBIqb1`ئI`TN`Lb'{ 6UN$ͣpU%s;?j<~`=ewM39d6QLwS9pAԮ(O FY~l9٦R]}t7C, ͻ䂍X| p\wf6waw">ՒF$~KOKXY%G/]_q$Yn6e$JtH{yO[ ݱɺ!:LyӰ~lb SSBф:_%ۦ8J册4*NidXhRc1|( sJ/mHU(ﴮ塭 ' S X2*ԔřS0q'aJ.DTwU#?CTDO}tU@5.:'ynxnYM Ib9znVs|ݾ|]k 9%$ CqڛTW{ӎ^|1rߩ2xI)gȒTe 4dZ [0RgFzt"QHkN_r~=jN䨾ގJÉ\ɱzz]v+NXS%G`RWN?[αzbtmH vi-{yǨ}8($Nl?7l{ v̋.*4^~˫ۦ# MtW_cM^gqPX7Rr N`6FrdUXmq\Ĉ auÔTj OPЂU'Oċ`T* Dy" XjI`.Srj  “` H2iUቬv;Aa&.H0չP@Nٲ=@35"> g'N~?|d`\A:wl@x=+c5ZIxs8;-1տ㹳~pNwA__\Yq^2r-i\n"X)qʃ7ebgW|- Uz~(ߒ0L+Z׊.5+.U{V<42 ;XR~ll"YRkyyt4һ0E0Ot.;66jEC*(cUj7h=b'Ѕ ZBLj8RR -&t9[7w~S33Ydk+;dwa8e=YC-#WV{fڞ59I0Xy[udr:QxE=.N7Ô RB`{W'`Y'4H fk ~zÛ"Ie'(GΒ13ȕ-R 1Rʣ :{7q4S͍ƚm@ʛ{ʅb-@Ky=JLf@G McY=؇Fzb1d2ȼAgrA2/ $N 9 %5CGlE3瞙goF2 |6k$䫟T S(<w?=.LkdEm@cs4>'PE Q"W k_o_~&W)[zfYK*U׫(Wuq]g$ ]Q6v\a2Zqt>EU F*@!= ^֢5SŊW߯&o۪@Cׅ^M~:fLA8ܗ1_UݲZsfej#IH.T7j=XJ+)H5S]%U|*9qgZ>%L(pOKqGN.UU3KY>'B\n[$ MtL2 \o%o$LK+ ҅ 刌)*,<%DHE{Ͼ$Qԑ.15EQC$J.UIRxO̰R2<-B憩*9@-9 7(6y (3:O AXs +E 1AU$](D/R8\ow Q #~a*M)hf %؂bS0HcR[PDN54_See+Xv;p(\.{GmK蠂LN=.s`*sJV0SNwGK"0bXݎI[#ɥåT gO+ Opv/LkL{U]ƞ/~2cSy2F6Zo/%bo l!Q}q!+ b=v+1H&eiCp薺n^$->7 r848Db2ӝ\U Zb9rlvS/ٯ~ / xt@Cj{||jvK6 Ի^K3tB;U]~(?/U@R)2s/2>v" .$c6.۳C yZ֓ERLf3fj3o⍑j_1&LF^1L͔&ґeD + ag#+w 8+<hD44(S3i-@{BStXX#mQȌ;26rI2+YۓTk|<)lw4.$cud:0Dڽ< Dcm$c8pÎp2gVfNQS(E6LB×+,r(Ki\4a]'-+dV(i 1@ṃPaLY#/\: mw$01tu~،ήOJ1sl~%ԻzM ~(@^t j0[ Q*qPP_oF41Wۯd RU,~AWⱢw  BA\=>K2BV#$1LǫFi&?4SNpǭ8EEQ7K^S fc$ټ-n^.ruN+DϮ?dX|̦7xk1{ūrȧpxŵȚWWWWuֻt3#96(|ēQw2Y*  2&g8؈m~fge ~m(T6gJw[}-}ͺ+RڞN{Iiߖd~u})g&FpoWZ<&A'S7RLiաyu1T"x=#ݲg{ ºi\3 J.P}uW[TdX+!/v `(:sʮ)M@> ʴ.9d(tM݄d#]{>6Fke d@ 76#$ LV{o[Nrb)n&"F-_-yˆ_I,ڳ7.GҬ3]ޢ7?o]t7}I8kFzV\"dt^e"rʍhzLIѸ;?5ht.<*J,+S*iwpݻ{rdN4V&$mYl+OqVUJh0JP!l-'_ )X`j`͞S$n_=E67!Wgm!0SS4la ETKBҬ2ߠd@%"k\i2@)v/n,7qsFMg2Ρ1o7`v\8ٳu^]rd~Xg"B%V\+1Sdc~'"1S:Bj 80X |奫rsuU@Z7UFxҋ&oW7NA{ݱfiL(>4x9RYp 'Wi7;wz"((,PGEUZ]0#;P5uʇR&+[4|Xb4 KO`;b JQz^8I;8 -xMYϳ; htZ(e#G8EQT|m'K;'?idgc8つ 5ϕ~F{| )#VuiF:cpy<AD l#]+NJT!TP3HKdDא7䪋܅.rЎw>lU'3m$j Kp>'X9XH3!Lc # UL8AN K an4O(xLO_ꎐFс{N$i~ }]u_:A~.B"# ^'ׯ>=oc .5ݱj\^U?pE|aQݘZ}3r3^MFGr|'w:ْxwnZժ mEZ9N/wٜN,&HJķ_O0e::qjrQdHXI <"x}?g~{(ء BQ=,ORlhE 1v$Ɓimn5kn)N(0ز*J}!ik<$wq8LYf/e(qIڂb@n'f'|ZMyg$CJ3^QHx~-UZ ;td8OXFnA=y1W}!w/I k=QϣG>op1}ADX|mTR\еޣGPiXal9vA:}Ťڦ,]jۍJqZv/I;<ܘϚ2-[ZpB e jh`ӈ-JXF652dpq n# m^K? Zb+vcNAv! B뙌npd-@telQ6+՗_>]h0ԚB<6HIf7Gh/ϋzbO8+jp@l>yeԈiWhJ}llA(Dchw)4AN?r\Y"q8rr$qehzH59 Ce ؃~G>}OƜaCጭ>@BiR(S14M)+x]i#2\|C;t4,Q`?t{6Aa`"x QE0J'duY ճ|E.I; .JS)$ 9+SCq7uFL`Uݙn,p df>XGt _~i2kx x1dc\¨lç[^?C qynq{T8]ggوr+BgD8 Mw\ik:w RAkV*nX럅ND +t@)6Kbzt,\Jb¥cdF1k{W%1xge19͡~!ͅq>jHK(1JRܾTHHA߆)M. L"A-*z`|G˒Uhc]c _@F%2h B7qz̜C12Tj{<0Ea(p~R__}{IZA >@ ߠdgAR*M(.MS@+  D]G`kze#`p B޴f`iu"*|cxX 1xjB*?@ނ&VDqͳ (+IP6u`Uu] Vx8jj4 8sD .*G(3jMPkqvm 䌬p+VvLYQ@O,`yba@86Y;Hā}Nj!ist8 f0Y(-=:Gxpi`nvwG 3ZXfF]-9,2S#."頏>r-r@Z<)N U[nSHVVe袐|딘 ,҈B>vwZE, iHzAWRt2l]\,q1!o?t[y{6KŰ7q^{xH씅kȸ [};'SN 6N `.T 6_xrK`/Z|F0~:s=ZS;~x5W|a |K_.9<0i-,R~X|<;kΌL~ÃV ی}l^7~bYqŏ͏>_(2)dh]mI^]G[tb+^ͣi b oj]WeQ D u+g77puΒXK)D!Ěo3p`w kpk&"dS>d퐉VUK}&`H=]ދ(}R yx6騐@epPŒVOj Aa;TEzqU%ڍH:ۉRBPҼf"oyUjG9v k `/0`\wH"ɪw,'WҜ6zlv%LBzIsZ.Ld%]²ȤY;F*$?)LW3$>=,oc.%T3skԷj°]n5tQ^&T{\b BL~IaW(1Ç?_s|ܯŤcyTI*P?88cr_-2(2qj8)Sv~O.),Z\=% !\Est 4uuAź0%2m xۺÐ$Z!4䝫hNNNNq>nFK{-IF'v[њnm y*[H:5MvkRfT&H7x,1j>p.%)8ݕMDXiyҗ Sa[="EVvY Uy[]5oa]+ÜM*i0bzxu+C.ִfo>9b;X6kAsb%B(I"CWy'Ŷ_:z er",`E$d "C<'5P"ԟ8u HU}[*aT$x_dѠ LgzЖAnanS[j- 1-Ḳ!O ۬5]]yE(/Rj[;,Zǿ;i9#jKNkR qL?uP ̨`C.b`cVsXTΧo\K^5BE#)@`UTn P&Dcsk<mI8e{$<񍄅,INo:EV=W (j4bB#'laMx>3pq}{y )h~sA(3]ruK4J14wߟ 97F0=e hݾhJڭ]1NnwD=}Sj:\r ԦR35o; ^Mb"Mh 8e{q~7$iX㝊Wpq.^PPZ_PXUօ2( 5eavwO-`|Lh͸ɣ$g>5Cu|tÀ!ь4%˴w#[x>GC)\ue$䤛=qi:%B*I"1:g"D/qD+xȾCղ )UPլTs5|Kz8,޵7,= vͭ l6u ؓ:d w= -YlU`ɒF鞞 ,oBf pT5[o*Fx\P~*? AqbqƢ1$ \i z(UgmɻiRpwXBaнDB~*:L2>:%+[hl-|S W!7f#qoxrîym %L\=?y/[r\`ߌ\!lz7p\~ "[tpɉf"q3xO؎B7Vc|>: v:{ӚJW4gUU3Bky$ݼq,y_j`0/)$<_14poTnel s!О+/v 3.5P; e^|)9֙ҟd %UMVT2{'bkzICHK?n&g9AU$Y'[5k$LZ?a8X,WL+!c|lP Aբ2ՑIdSaD_M^@Q7UI zD$Aȵ4g8x%Me`@ZTv !aeK[%xrV#p@lw^SE *Vaj}} @rzL‹Bbt$IDc<38͹UWBWsN2nd:V}N\*O'~Uְl55\OdR!T.\jx6A2H<Ɯ+x .K*##b<ع_2iн8 1 3 60qCfKe_F/,a&f=fA{{eXG=4˾wђȾWNrT|en} E[JV!TR/۞_ !\<lhiz#@b\ W1/՘SPcq g]cm`: ,ldE馪XF\T3J}gmg3i.Wz[%/[,MUdtEAJ$԰qּ]+աaBkȤvָ5owlvȍOqN$a&BHƂ1Ds54WB:8ʹpe\vqĨwv .Ѭ5iLbn5hZ* ?ke/YL tXfL0Ni- L.4,F5K! @1gpHX Yse>irsCN:sq8Me+x 'q㎗5%okjxk}HiEc`٤8L#BcYI\]\ `*iLD4MčY8.iL A&X&k';@Oq(M,T: =޿*hftGS;cX!yB)>wNfҚLGO`EŹvQ|]ϣQnpR4YVa"|d`X$&"bR+x9q -Kʉ T /51PVx3a}W$m>6dLTƄ#0FA$8&B $6$. .KܤKo5s9ziS/gu)Ӫվ׫}=%+l?]1uaVxbZK2#ha|Jk2rZ*ֵz>QiJiN?;ˏ#A,^8ӊQ0؍D4m&YpIڄ%qchꇦIjϙ_Pw+kn=3z K0_QHRR4>)w!t],z_`2-4u FR1WvkBQDRšR`u J V4.[zH51J!` 6`RA\J=+ 5EPUmĴJ;(= *Do|Ѷқk/F d״LԮخğ<s7רP>&udaB1akY빲ADE6R2B'^ . 8+Ți CTGԓ % pt'D=U0yI۬w2 'v1Ds)ƃɇlABV(L zeGHO/H c$6\>etયxwNwbB>aw~ɨ3NGn٢.;'=vQ'D?F30S_p( cHq+9 fFIB-ֱ&HġvJ*ڄ[ 4DQQshstїTw^>au\߇M;uv'páYWFhWScv e=xx(0~"%ф84BmQڻ-id nYd=\ % b~ f) 9bθcYì< G^jKT{@0.<~qI )"϶n?X׹8{~<?; 4$\tHm 37&HllBpe߆4J!fX ~,wϟg/~yzu к 8arQd70SeϺ ) W̷g/vWFX"?J'q'l /,w^zϏ^}nM?]sfׯ!9:;<_~{_{م zqxbh]_tdznvw{ڌϻk;_: /yrgך =Q9 AKoݕ-im? Qθx7p1o&?qg.(^?ߘd0~;Ns_NN?_0g[}. rwlG]qx9f}Xsf {򜨯$پVrqvνK;0ܸ0C*_kF٤}hNc)QG/]qI˃~5Mzaχ'g]̷@0'7gmcr9NA21\S{{n9[̸-~nۖ^g%|6} }滠?:R37#`yY7w&yR މ8z}ۻ0z7owË^ߞx:^ގsU~|<Fћ`4v|“2T>G7 e&OGMo|_ldmt` %Y> lNeg=f`vȥ~>OӕQ`kJ p&wPbqz|x:\=O(L]ybaZE5;c?7˦jY e2/A+Rӥ [t lwx G30Yk+Mښuw_[ǫg@on(`I@H F»{ \=.a~rswoyeha!K0I\w(p+CBr '( +{nCvom|u'kDT~ޕ4r-c^D͓@QfDW:+ؾ A_Bl^Vmz;U~U] qd`€e ʩUX 4v:*D:K$Vx0#Fj7v_ͨ ߳D$>,gaUa%ٷ ݿX!䫿›e0%_ :MN48 n{aHRbko F,H"e%IIFO E#I֧2n4u;MNSb!:6ŀ1("-BtF5\  J5㰰һc-;"0eƁCF' zn l ڕY"#pQeu`,k_BX u`ay.RIC)!e GiY&`@\穔q=2- (PY OFXYiЌQMEZhG$іk\Q`_륳:˲F5 Q`eY1NQ+kURV͕ʨL5L?ӣ0P3t(*6(jb9 KH R3B (0~TɠI k``>5 a0BH]χ',E/ܬpMJ&6oJlJ5+!69#ŎoudAIʢT1KXnIÝFr ͪ}?*9.RLaX&T,XpsD,;+wojO4)*QBf#SV F[c IsiW%802N ?q.Y>#V+;琜&"03ZEf *2Ĕ;N"-. 0GT:Y>%,A[4RX7rqc3 L%@^3@]1Ḱqː7@%xa R$` ؃bbPO"30 J8Aqp)K Qg b=q`P Qt!Ymھ ݳ1 }Fu%]2pVfdEJlï" HN-7P,2_ k.MAr`]ȅ }|?`'TG0\caFpS`(#9$2q"*;W4}=8K=8+<.XKJ;i;| Hd6c.2C1EgV6Zk"ۃ%:aƵ 8pԵQ=vSM V"G!0łĺ3{:Y0g)8j8*=޿a1 *4pw&y "aX,p$""(I^M7`xE9)gd)"!FXfʇ1mPT68َG^Iǔy_ȶ,sG-ͪg.CšIaͲh8QeXM LxgWzUq [. ov7 m˰lF"_].}ړ\+Q̠2~'(-19yԼec5\taۑ8ԑ3KT̔!t(جp1D\EZڳ()V*-aߺfc=K!trf`Z$`ɲtk"ʱmw!d͏V5[j<IY7|Lpz[$*;~zjq>pl>1a%ӾKDqZX>Nz50-(Eu]s5<Ħ[lLj*y-u *_8ONJY%ՒcŪkX~Q꺆kPPr Tp*%e(cDp,}j% 0._6$xOQs(ubf;"D慔b^*eGysԯwD$C) μt4c>5 6r1 sZy$>o7aB5Vg653P%yyguTYDy)QJ뒒mfZAOfk+䜣O}xity윓?o o^66S~߸~݌.6luwIl cmzWxOw"/?d)hUsG4C:)i=*0PZسqs2N}zSxz򛄭~SԒe)Sp`(9~09j+#_ycS)Dv7w*ːO`YFµXOS>WW.I$gDEƐ2Ϳ. L%JDO@ũ' 5uVfܞcR"VӬY9F%*z@Nߜf0NTز*sJ*t[.`x]`I +!`Huq3V||<# -^N [<Ykas^h' 9YiDp-V^(wuqHzjYd\I0X @f3V,ˎĞ,a+mo3N$nu/UYd8O9)/wO9S):A%A{ogs[vNKdGwnbw' )|OgFP Q,BJ4vURUz[b6RaכV^&t[JYI=i?o͓z@OI=i@ *AjɪƉ)J dwIėOBE^%{pgZ:%w%].[q?=+\ğ ~95j|b .pՓ R6Bu90L2~_:O)qu!/=/OS,pg~~M` ӌN8!͔w>-Kim{}얃`ǩ1ҧr>r[> N4zn1_~߾?g7l H!Y)^>|?}k׷E;^${;NcwOd\wb4t1$ೊSfB1Id-uS% :E>,zfoe؛'Ӣʐ-stjW= yx~=]oj@!`5 4 YsVsRLXdjTajP@7vËg7Fh:;yʱ􉭶t-f#Y!i* MU/-Ga#1Qr ~O>3l37ƳW?r5[zr{ɣ{nI4StDQ\RpK|H52͗lf"bUn|WTL7pB@+yu#¾^Cgd̫;JRg7S<\E{+}ŗ_->v`AF. ޴~~;P˫_F2<>T8kw?Xi-]A_]xy!`(''^2 ݚin1hi,]-bO5ⱇW[}um3ɯ-/r|xqv҇|^*2z/|3 : s^,x)zirƭrT/¿tf 7K)&3SK#!P\7]b׀I=YI-mfs'V(8-ZA*)02,B ;V:%xα792AΓ7gV.s7vd1:wS јrp \ub.q fk6 ֙0/nδsM/NN^ӃkNr5HRmb"$47qV h4%v.͘С>+|pڤFOzF ]20!ԻwkG asbBc޺N6X=+|BҔjK1>"K28ڼz-lfGY#«I}8 1{NŠpFDl"ƝJ[es,Gό` Lp{(cDE0^Kbpsx]hZWmsRRf a.޽Wr$5G/W/TMI!ԐsxȕCb|KqVq:P=qm`bjlBuZTlO}\cɗZz|yаC|.J6c%c!x&w"ndk]q2Ÿ7uۡjÒG{ Q|jj&bⅨ;δi!)#d幜]we r*N/ϯCs݃]3pčڔԦ ަ6Pj)w7qڣ rVlnއ{aG};Y{^E3vu5Pkgׯ~WvJG?]gMZ_%ūkMY3bd &k!D`N\ 5|)ƴn'd>dUs8\q7[ܽcAp c nZ}fƃ|fk;hra`Q;vkS}c^.;+S_o^~q5v}꾧抎4\5GZʩ ލapku!bʰr =;ڞ=L OzOOBް1ʓ 9u S@BJa$*Ĝ,Z["UЊˌ]B~o\oS[G 3s1.4*7qwt=<=nQR2qho:Z ;εr$8S`l޳9z7 5H2 K1AvT{7  ᗬ RIg0/mFpc!FC.nX"ͮ;Ye{.Ht*c-BQtJ4&ܯjͨKH%.,OHv`l`bUk}wr fÛF8xKQGfgXBHcɢ1sxmAx,-wkSY']7ks"GǼ7h8BFѰa@X#7ΠG7,Ei} Ft$[* A7+/|7U z_v$ΐʛ]@*o r=*p- ߑl%cErgyg۹6#%sTPb#R1)e@71I [;>| T2pB''vܦj"WWz+[-UQvVc26`mR.J#e1c-0VzJr몧#,`=mSp@޷ AGZzF,4mBx@ u;/c8Pe. UC(8_n;A/ڏx3%;0_z]RHSmmiR2URv&5&Z* 5㣩ۜI /$:r>V}vJZ\ilA31){t ڞ~GCd{%ATLݱ%Q˵)ő%(}f]BnyΎMʎF<_\#k"s+퓳َ8{8uq,7`3׏j n{oۙ6X3cb+?Vn ,uvmfHcpN?ӏ֐DnQe{_7G;ŶikWXĒ\* BCjUQc":9g7.ӪcJi @R!EqX@^-H",g3)!wkCm俶 r|;q 0RL7 (觛9)g?pxuvqۿKOؓNqbHU{^MoN.Gwm#-A+J"a1EYX8$@l=DglRo uO_su^?!j"]Js4jU[zH>6YPr2Lʱ yުyLuk*w"J,PuoHUx'mbmhE` X5[`P 9iQY8mJqtZXF1 [97&VյV,QB@:{IߖU$EHZ*yRBJiBTw9u!@[:w1in)a2f$bU8 bpt;J.*ɡG!vUVmKJ3Mr,Zs¨()l1?{;'r7j;e֛D3<'Bp4H9Ӓg.YcIVӓn4cVI$pEӶ_m> k9:8,ZPLh(kx(V5#V /2?^粕Jcl1xS'ybVO!a}P !ƪE5z DL*NQ00ЦCPq/VBe} x3VmP3E. 9)@weеnP@0-`\DCI#o ta[ڡ^7R2t<m PLH X\5gJdsJH2 9#R,uIq_OADw] YlN86WcL]8?I.n6>l辧+DBHd o$9NDci~h( /ҍPp76t@QCGs4C#|հU $!U~:;J$D&ȻkJ ǬMjF7) k>׳ۼ,O-o8I/r1#37&q^g8b3A ɧig^SVlߏk/{,a l;na9& ZHqdc3}~7z3Ϟ?{nmO?|{~̳wǣ~0e<"C&.Uu-u<'R z4k 88߄X 8p&4Y\ YCU>[  Abc@nh%ZF>hzuq-П=oo)NWzըw 6hWq gSEH nh_ukv~-a@I bMD^FCW8t:[̩ UK4x68W9vu]D2C wtN/PfCe((mENUXހĶE?oA2Z7Z&|^5.9mq x?b&fsdrKt9ńѕ+]р Z;jo .oYe'iv9Oslœs{y C,wV Yދm{u4]j8!U? y~!RW[Qj6ٓxqo ?='RHv 9CK>-U#D 7.sp6N̗~ `+y:uFW 6锭'fKL7)a;jci5^SJP G QSx% M9Wlu=W48gTe ! _,~sE›}"@}^ `NV~tv9jp6Kwd\dIKSݫ8R%Ъծ cKDŽӔĎ×Є/^Ppw^F[ LlW ;@PkIr>Dq$ f㒩NZ}CZ5g̗D9%4Q: qO@726bQa%N۰8u` z%}8 \ZX;]\.rY܊|b0fC>I8gMvzn [3^ x"˷nk(~`c>vsd`cyx6@/6!p6!p i4xm8%\z n)/yh\ˡ/ dA M1h|Svj\q׮7+4".jؤ_??o %Ty >H*vzAyrvHɸ"(?^GXWFr]5UoN3SMv}U>Y\ j\Aڕl6ܪt.:[S! n3b:rdPl5dW{ΧSǿR.M?mMnY.xY:n50U,X Xld#G"ߏ/|^;2g9Kx59E(Q7{` 0)1VUHՙ"3J'wS=pϻ"v<X'\_7l)\k  e7(\|}MqK'qPkED:~&/N<6 l/a,<#R4iA$[KTf ztLS#8!/kɌkɌxf>dG!́gjd:>QgNIl,S,(ڎm{vD)c|OXy?T\Nt ąaF6/2/$qrTE¾{4RTڥ'/og]"[pw C,ݑS}^GuS Y,KtCEE 3M#؅XR O5`h /hh!5uݢ ꛁ+(A,92 MtrtVz[?yv@%!RuX։Ȟj94F@v5UOSR[L32h  J,%8iS4D%6&(ۋ~ޛ,,eL/l{mt:qjLj-)"b+4 VJnbw4$o`,^U \! ,z` P7u H1M]6)hm~zW/ 5Pu+f9o]9xאMz^+xA\J`ӓ*3k;r]Q%`G+0]D"(?TbhKB?qKaGDbXI *bhmltv3\~5L7M6U|b0\o)*,I`m# [7vs$c_-ZKOj tՐ7aZȍ_a- $^؃c4ޘì}zZb?D[(7 (VuV$2ߜT)/?8FЊ[X͔9=v #strUyv7agץn157zganT W{oV/Sd ¦[*TAϏ|C(5 Wa:žN=wu/뛻|w1Y_7YY0y׀-W |ht,/ekcoG}rs+@;3F6>|Yqu7-O7ODt[x0) 5fPPp*.ki` Raٚwmcò!8xׯw{[CF `(Mc)DDeҙVD@蹊Tya ZE^ƆVbmL}#z !MTJjDDL9Lc&T, eCbVd,r Z0) mvE䚳Jw-&iDfcm31-e'*3daSէny98*$QSS*LeeL'T/ ]V/ UMLُ[ˑ9zND~A`b `]*xHGkqŧWaeLP-^n,X'f{iyؗkNS -ZC%c/AZN(w-V'+y|w ׋lS>hޔ !VK=ڗBbo@ahE,%x?c.$MmQ/w.?RND .ԕPA>5N.EKWrfEUeʰR\\:eHzGrv7dC Ym'Xk sD(_JG̤iE4˾~F22jAݞ ;JUO)E# 8)1 ganT`iN'.Z2YqASgD͑9zO@3P5] I[m[GΊ / xOit|d@+R&>2yG6 1GPꀻBKSd'ۺPD&  ȄE|>q_oF"@-FvB#QYX!@"~-~SzRT5N_EJ,^㠋Bex9N-!)]YQf3$_/*(=ccJ)Y{ѱ}ϢuN0Zz҂\gpI#1m#LX8ň )d:9hz<\ (T{`>ru,:6IHi3ѱbQNLl#9[===t`-fn #5l /pDoe>Y]1={y{5}" -BQP'Do}癍;F/# {}מw9B t_>e{XPXN>@~ms= 4Rӡ8m#.HhG/iKFh hd,9ՠy7YNhh=^Þb:"sY";h? }0Vگ2yͿ}{<o|=P洒c_0 |yD\9b6}_PۏAʳ~b䊒`6O@h۾~aϜŎ GS|[o0 厨TNՖ]Z4;yXc<6"HHSd2;O'fץE8r&-dHfȸlYG"=^讗5SKoD{BƎ8i=qT'B*kȫDь Re菞B.VDbש4-tv~@ne@NiRR8`Ѡ@u{ ޡEu{]EAQt)nőPdgbeM2`]\D`PB+ѝ{\t*|0 [hYխ<c'B.ԄǏ/Y< h&XG& 'r@z|=́R5Ck* =)U+6)G2l-¿EW">tF8q<*i&yDrM И[ )R r* osxz>jONai/$81^٪$/g3ߖ圓2d'Yq)mO;_#!XyYU^YUyeUUWUU~U*NpT#(Lb" I"(H=}+Nn؜ ڬ<7נyfb{~Q2{b3<ڻ2GHm 8rºms")QϦg?|`^6 y?-MkEdcJӲ_7GPG[(vj0G/(0L$Ju]ev%>_/6'"ٙw^r4KɁl%N=U=j!̃q$I0$ۣ`n i$Ֆ+nYZGI&YYQ_ckx€Y|2Зӝbqś 'mzމ'`w ;u.0[ǣVrX@Z\`kf#t)a1auAܲkz$8tVeVK(]1 /(|*eǾ! dl[D(G

k-s &wj[hKDdݛx.E=&۽.~rUjƊr^x$+C'[=E_]!\}`a+sdbjOʍ3f ShLgo˂`Ľ LjHMV$bm$ÄlƤ$KQ5 +ޖ317vagɍɚĕ kמHg_Q `֓Y"`{\@VK/ >L  K%1%VG:Yˊ<αUs+p6fwE! (ms%7Z )d}F'| i b^nꁩN/]ǒ[HEQvR#"(KXNB-#Q|OT80 |ٞ89CXʝL|v[qSiO0Ɏ$]>W-7QțS{Xj#54H|\S3朢N|^ ELSwine> אR&UMo LZw*aóQAx *‚c9zqOrDb[7~DKPƟZ Bxn߶LD~tBG+w Bt'[@>ދWWiwSp[a4Far|gI?L>{߅qяptC;чR峓We<̇lƮ I)um^7؁3Sy?ߟ=n>.adN.|81ZͰHI/ W>(-QxY#h},tߤ{aeܐ'@i79ݔ)=Pe|R^CSJ.t\G;b89Ȧg]7Kˎ~~w(9؋5gZ2a/[_ M{'W@ztU8 #g:^䏮^[|#)4qϬ8y};r$ٻ^&geg ֏Nf8S?3K0əy ̫:IefO,(-(TAџ`_L\ OLA_v9wzl7㨟kó2m^Pg+-o ^jR~M-Utp,|m5+W9Qu%ΞVSһiE/#U^wRi]n~,9%ޚRF~;k<Ę[#-MUAؠ :?)A̹@ 0d 7Q(K. eSw4)b$^OpCKwYCTvEXMe{9+K)8zˠ(;y~ zI8M8:w\v"Qy8up8bBÿ>΁ҹ U n ~ FJO.>M*PJWN`1tk?bZAtsGX1EyɀKSE<^V5.rEr2=%zPC2Ls:4 G)-v!rY'pj [ 3*wuii9pl5墸cdʧݝ n;/W=4$! ɯ3aWZOi䓀tEfZ ؗh(kS%˪&E4yx 8 _E! ɿK1H2 "JDQ|-AdKE4yxyin%-;8v0wdŇ$q;r*XjTQuUA8eH>|qEI~Fp*T51ӟ_ ߤほG?:^88:֟N5<dg^t2G?+G 90c06߸gug}ɸz+4@f\s&5w dϡ?PwKWGa?` \Mߚ=p+`+nKz֎Dc,-8 > ۷H>&7D6!\) 7qo56F`}nEcgfPc0}YjM`[FJ /H;Z S/R$_Sl!"-Q^ &67]ȌK)jeArk8^xM; tLVܐOeXI;桐 ^6hs ,&-`x /kPUe5Yd=ŴX׃rxi8#;]_&vJ鉫VVi76ld˶c8CJlnnн׶i3-4)jL ^L>H+5HI۳%$A8[IUp:[]z*\-mRFUn5zﳰF[ؾF;{(CmT5> NZ_T6  66tk{}0JH{{{'=Y{39QZT{˻o&W(uNY儣DPQpه粈"ŻYW_z|8;} ' ;xW'4yi?K?;tdbc訓W\ $B$ۜIܦvVV8X$>}w>ԄbZ]WHEFDЏ52"r1j+j{Ce }keg^WiYيZtG69[do_wNoұJof]SHv}HCjGF65#J|8Qs ]s- @Ī0vFbrMXG:lEGmSW:h--~#dfKm~-/oyHGDDM`6BFa)8$ bLDLP*a*nVT閭MW ˖n)av|RWA5EF*M25¶Ude;BcRo$P)`X3( Gjj+ )|VT͗;KV7_F7'Кicd叮^ Z>p•}[ 0:"XS% #O%C~("#ǖE"a QAh:0M84G1ў:G6cy9e@]ԌVj`Xd $^# v`-bɸXaL&R7PQxP=@/9A q[P) qʁ,)h&Ze+"4dD8Bb`\1#PAqAu@EيJyze+wp;[H777'w7[ ViCJN:|=h.2Rr"I=`V'%YO bp31KKKK~.4dFxuͧ:]OrZ |_7cn`VXXɘA%ΐE@rE* &Ў%\2x~)d{6o;^lGs=oʽ7 'inIP2/OY/_,/J^^3dDOJoX@Q0w*H3}TԖ6X!C"e%13(iid#N6q迢ҷݽ@W){o+u%ObX$[=/=HX3lxdÍwgt97$2Yj6A؇Ƿ!O,Ybn f f f fVP ga)W7/W3 tŒFyItt+4kI3slVhR}qW]&lӒu.nsM~ ]Vһ&t#|[>/9$n'2 m߻IoЧv ~;_n/~s%@ J',40kh:Nٵi ra3aú̈́5NS-e_oSN9uVk~>~Un.ԷWig3_Q_սG nrc7>nL8!ge#^,:̚r>G"4󵳓wh¾r&cf]"^ys'e]tˁG.z]_8;Q DjyMg׬`wJ+) Lf~2ⵘ"_CS9$! wp oOjv{5Yk!SX[wzw)rG>Ah@[8*+VdgD616㺍=<ƞ0W<Ʀоu*ͬգU1fI5X;xA !{ǭ$*N8v\"ƻz,u>g+ nwpd[w4kت#_3߿*oOޯ) Qp-:ivycX1Y4pk+hgٲ7ij=u5)ԆbGU ˭_b9D|vE̒ ˱lu-=W1V+ڟU _VI.p\`z*$"GtyTs5jTCoْRiO5V/ZfȚ-rΎz؅w+b_'[l}zOϻU5 C_8r&F ;#5 fEVGo-ZnKG:UJ^"T=tyU0 U^WU;~>*|nk>%jyb'K(\'pww?|^'Ӫh݇0?j;??qr;(%4cT*fJ Tke^Lzp@ϥ?/l2 sɃWSiTt?}/Sʾ5kge(Xo`\SozA5W'WSX]>vg 4)n3;8heRp9m Lahb2.\st|._EG>'a^ͮ_|6(w(Z6yQ4emB&e&l4%rhnm60 tk ^6ns<6Hme€2#`Z4RԚ 4:'9gbh ctާNzCtA9mbB0׊gu;@LƩg!^LS./>w]geYu~|>ev7gQ R-L21mzHY͎.YA0Pmc4>>|gk5$x<{V=+ߞ-hLZ\jrkC lZ&ۄ ^~[ T9 2}Twwc$hYʥg j@KS8R&S2آZІ: n2ƵTar:AxI#Zu*ui.+ Z23&͚_"_PY(,&?]#tVנ76ޏB7~f솹} %gf&g۶I :bDc89mѹ͒G A~f=׊ޫ!Ơ'O xZ/.ϲ3ΰmaS)p:`{KpkKPs#Ԫk P2bR ΐIᑨyE,&ԫ8=gmnnNKU(: G ez304kb) 9LF>9x %8roZ-jV"i6b ʻѫ"ZqcY5(xl4Bc,κ5OgȽϳk!R(~w4u(>Iw$OY?w_xvrnNtr %{ N̫Kɸ姷FqzvZ>_'?T俿h;Fz.V/|ퟴrvBsj}zrnnS}^Agm<s2Dҋʋ'Iv}<(k HRj ox稓g.4L`eqυKZ$d )=x7s9ʭ۾=jqfht9͗ 17Ұ2 w #}jjܰܰKWqγl .r( 1ן>t/t~ݩo߿l,ӽ=}[~sq޼yxRxQ/{~~^f=-yYrCcr 6r9g` ,,4ۘHwS~@qrhGX׸p*Xغ^KVR]}FasW~,йl9DHQ,I M2 Gou  *PL$d{@rc /[hGX6qu]N?8Wc}PMr>D 8f> RKE.VwZ )scc3(.p ^LIgq5G'+b8BV^On֙RI1~&dRy8J\U)G1 ƨDs.QmȍbJ 6AA"]vnk$t`.+j:J l)o'fЦ:ƹM=+KRK:rLim<+'1$3(I:%JsJEq!y/HIXmdFaniZCJm#)JPrh׬M! aS!WC  iy ~}JArfL8nm1&dj:p\1r=L׍PDWDydoI螷Yei=†(dJ,~ql;~H8&EA]IQf4Ne.&j{&S4NByE8jQw`Xpm 4Io#Sgs"֋H;$gv d}|FX:j>#zCη gh)GiB8(̼gG(eqPFDC6r^@ymBԣoC$T,ɨmȓ6bGXЮTA&8 MRYf/J$vIKQ^ {Jv*C*c*=Gzڡ }cr `cÇ1-Fs-(B?wTNFF4]o2~ MypSk5mJ"}f 75Q_\ekZ[.ƃ}I5>pq56J`;]}|35Sj@n$mKwn Bȯ 4dvbRz;ѥR撎ʗFE%(̠Zn61蕩G@T:Zqh5Z2"z`l\!Vb>B &i+sF)bvŠvh-0kE`sk](+Y֊%ז0۩ ʪ#/3GrvvhJ\_duǾ|'$UӯeUo!ʯ}0Wۜ-a>a*YZ>Vlc:qίfXyD/lPWRR#e=@j]\h/>ܛ4vK{L;wr L*fK%--MM*ъf [.AQ]8%C1x0W6RR ʼnNr 1.Wnœ}^[=^/,\6c?nvEx}>oތW{H{L[ði}};DJk!>a+r6im,K5h_}PG$N|* 0i/r| ċwh?ݕ%Mu5|:i?^JV^}Ϳػ7n%Wpkx]{6p?%0lVL$/{p[iIݣZ օ"*EH\Q*Xla[daŹ_kѩ2k|-n)W0If4HW]Y)Wӯ [&^'#s"t㹅?㿿ބ1{rT < =?Ti ޕy*.T: %(ZD 8g)\Ps;xX4uQglxQC.wY)g9?Ÿ>i?ff@juߪ^+W-DrRUVїjIr|)_lצb{?aάMkq*Xe.ϐkFZV]bw*_K4p&qK_Oεl KlslhK`u &g\nl^BŨkM[4>=$w4ϬR!Ds-u??(f=k˨nZB/32Cdo3.Y!`+0ՙٛ|[donK~ǙVi/Q.Ed&69 _ 4N>QqíC&a(R`Wͥf6ǻK%:׌_"92͙0j'mtnAL4K"fu)E8!ant=2(3D.'Zpƀ)c*oIޖꪮk)N캮y/ϥR0Ɖ܍kiMLOMPƶgqWp'n~(]YX޸/cޙgn*ZJvYםV>KPY.hZyw՗nVTŕ)F:X.~.GTtHڮ G˹m>\,ƥGb?a/p!őS{2Yqad}AGGeɖs!.J)^p|}cɭaqBg5yO7x^pI\jdN b;jJR9NaX-:)ISքKfJjЩǥnn-^?^FuϧZIuu2Ḙ+cnyY2YI2HnMK<B1Jgu \Y,&-&֡V ѭUҡ2n;n4H$( K*S.3e8Bq +wOZ W Eq{סSu^JYAՋgQٜ2naZ%\Z*b (G66( )1kLvݶN۫J6.8F7BDaLX >nb[<^6;H`zXFk gr 3,Dg^Blu^7]#Y-7BzS7BFHMBG)/(f G][i &a7!l/f"u" ʳNňsI ˑH0#yX>8xpw|ђ/hIVNAwQC:fWי3ez>:OFirz[\!_gN(aO۩ښLԺ()۾B!uϝKy vj *V" >OW"VP^-(o"IAjfz]Pfn-z( J`ihρ`fC{x7a弱xA[3Vz4x>jAtI烕_>~YEdžW L~`ikRn-N٤IY0WDGXǑ hnb|%i*Jo'Hu;ջG6ѰP0c[xOy`[Ɉa!"$bRn&blkOXJ7qeœ,k<#j]aUzC+aE֩1C(]J[=u'Ij,HWRS*"i={v{IN{ėRbk.I̒R,:Kz$*lQcc*G#)k$}tA 5ie:=iA]tz_^w"ct!)K͛>e'\Rz0F%iK@ }{.߀:>pހB}+)~q]`Ô[1iKT"'Dos 5swWETKrDk%l];{1*d|n53>MZg{T`z4ixHVI(sl%[@郻f#^nPMq'mk #yNBky)qúZx6vipD1j`%qsH6~%j^ 7\Eß5/0k|XpsU˗]6KqBvK'8!8P8tDJN+H% #,9Ts{ڪy|rެ+=\F% Ԁ>B^ր&U/!5ȻNa.hk1P Ҟj>^HcFFiH}XrήJyɊ*twУçty_ =4N>ŭPen=+9)>mi>Áӎrle'\Xo+oY)-alք;Q֝1عAq.Dܷ yӴsFW,D MMr>. WVV:Mӣ IJ+,'[&Gj0?[` 5R&8 tSl Lkcq_:q0k~7cyW#9SJ߇+=[V| u{wc<IFucr9c,%7y3$j˜W"eVٷ_~8R aqEUT`u83 Ts:Ī T+V5bfWq|*!%Qa1D:s.ZL̔a*$gSe>w^zcj$L^/ !r~'}pW-8#5(DHruYa]MQD'sgHND?}q͔ӝt~kE+[>ob~3|9- !͒YDJ8!za_;hY@/ hQdjLw)r&QH!cLU .1 c(s06xOpx2G`g6Hs#I1v[]3`HlD0&ht^HT ;3{2w$-B% Zy#Py@K-JL(6G#!51 DZB%ք{A]}U2Nz*V+v^ZbSkH" b,0g&Ücj]-)3E\iDd愞cx[!5H$GH[!XΈp O4 /A[lg {N;i;=[><ߘ )qw8#@(.LFXIFL*PZ8J~axW nuVyyH_"!z L'7?>leIS̚twY=: Km`džZ9tP0)l'D-!dze )DL3ZOK0H"JG^$;f'IB f̐I5Qk3`A.o4R(a 3rGۊH ^~Q#F!٘b_~~[5y*ƿT5d4s=H ғ #R܅F*_f-)ȲB_RqPhF)8׻ 4`' RMMP)1/pAa)rM"5S3Lא5MjUS=eS aᒮ|=SLUSEnBoL0Y9M :4`9U9%58MHJHe[Û۩$`Q|U6>ڢ~E.҄e&F\;" uJi&IZAg(; Sr]O-8O QJ¼Nl |ux0^ R}ޯ+-X7[fw ѷs%'d(FVLS̖_B[]q00sb l:`<o\Ë=ks7/Hx?TڲU\mbaD">W߯䌈̐ehCxhp*׽I(k4nwtGp?iuQh^ીp{ԚJPVowgu2?HR1zfE@)ì" 8Nϝl>yw*©kD`gW/߬N`hx&O%OP_\ ~}p9_$V;{:wHX%ˆ0QB2P +I21v:>b@͇hny_@T2k!2sώ[=(ZKDKKO [Dz;X`ږ>{VMЇlJ׳GU" rT7G'U%#] 5Sƹ6-S'kParT =EZa*..ڣBt ݶ]j$AXY{BUY{j(JIz( Ez"r^4L7,gr&shINVkko%'\&J䶾.T?@t@cYMQg&~a~/s&ih&$ttttBL0224p /2^zKRg4igG4kXχ]/-ެSA{K݉;Itl fMfsv&w.7ӳ'!qMz˻bthLyv' jĬB!.^g&\T83PoUB&Ad[rZw Nn%cg[%JpHgWGuEj$Gqd*f*s͚t튒]Oi׮j(P]M #( h(|4NBB$ :%^>tUSp8;_U!g27ܛQ4^795#T&s Itd(Q%t$I%X;"%Pd{.vbf*]hQt5SD/?*EѪdA0%FMRS:*W̾YR:th,! xb-)->&6wnnnn{ySGKY!)exb2J(v^MK-ą&cFRC@w_wZ{u'Ȇ8tZ ݬ帋7E覨>v5AFm|r_Pfaaӄ- P>uۺD(ڜm[h*Xui|vh!c%Bya JK% [S̱t;ʰs7b)t7!F-\C4Q&lG| {XQTT4>b2WQӲ _5!ZePcyoW"x E#g xS 56+0m}sJ0* m sl.d:fe=bF_^}EksU! )l@[gL6^uBU,-="v1/GJd %M5|̅x9,O_?*n-Yn&{vs; gw?|t&'kO+&έ5-wwOMOy/֎{Ӏ^' pG_򴠜B!mVp?g<*lR 4BsLZhDpP/dI0!P8V0rr ۉ" H..4(ܟX糸buЛϤk'P+~(hQY;3z;<(>q+č~xp3㢘Q`9䗝mo0dg1)=O&vKViX s]aa*/LrO7CGǤR1vBx23V/m9u%H\ =0? o,*j˭o/QsZDNhz2wߚlr0m9lvAS w'Y+T]JƜ]eNJ&.1GwЬ=:7^MֹAO#_?j?J;n',z ̈́$ \ DLQu+?>+]V%d^!q0#d)a`$%+1|r!7Iu:glggÉXo{IMs6KMI^O -~c5je )mU< tAEm>p",hE)kOBdR/, Rs{n/tCcnh ysz p+jnO; @ AoG[]%0)]{G[{5O f .܎C;0M}}V0 &!LuB&Xb(R譝)d%+bIiI9 5YQ!^bCilF2blHLpZ?{q6^ /!<,jAR!3$f$D{fx3ƙy*$^W)%)`TKP -<0g6HSepϐ*MBK%*A8}-6^+™4D0 H0 +͝Ρ:Th3/oc38.Np=P! p[`g|;M\*$\Tp9< 9G|8~+$URNZ>z8 x `(&TWf 8)2KeO+Mg ?X?{7CnP-3eΞwUlXvqFvQ³yʹ%k3 p"NМ`,3NL2XR"kv/GgN)S˜xc*PRRPZu"CHgrƄ,6*Br҆b&4y(B%|c!S@Q&.jWjl82i FgX8D ,Y&,,!RzH x!61B7`Dx]=jQ R(Ha,+< 8IZ#\le D aga{[@c !Tf ՘r8Gs{y[T \7q0v7?i$+v$DđH{ )N (@" /9ydb+8O- VuZHu̎ K*؞wvA/8ᴌm (Hxg>)b'<7,A-7sR:{e&ʶd<)"781TwLkݔE$EK,gj#  za1(б t =)Pʋi_U^LyB[E8yy.#dζa˸HetHC\J0M@`Ux&\ik5̯BȤ' %FtkWlJE 9G9j5.CnӏzF I-&].)gU;6: ELv{ )aE¯=&gw:*'b45*gRq9X(&h'9`x5ztf&Tp:GHQϴ~\H1-41 8GŌz `)FW1ȘzڵOp2t&hz($$,zJٔS8 8m? rI'HB}1E__vJ` )h@6<r(IhVe:oӮo5H/´5 O G;H(\d5zt}ixC@+Χ+K znW,ix.J?:8)Cxˋ u%򔄃$Ms4<r( ?>I)򔼳Sр~Pp(Ψ7烂YqpFAhizF 5 i]e#eһ7|)csXaT|X+mL ׶QFyWwggۇ|떙 w7`y!oxY1$paZjb& A`x`)'<(Z3RxnXU7uG5mP]bZ̈́/e$)Vٳ?9JuݞORP95񜹈0eiHg V7s0bSמ[kR 1!GFɰ!D<{v9OOl?EIpSZ8#BB9LkEi GjkZE^HP`e"1$ BhJէdNr WnӒ%Za_g{rQ͕FxJxaY+APExCM2&G\q}($*d%Jʝ`Z=|]FDG -&=f>Cj԰R2&'f'ZOhZN9#[TſUt&qo{D~nn89!MuCxqYwRfR0 `|A5JH2茿r]|G'oww͍uz1x|X-|n듥xZ:YKҀYx)|O+5[F|/XכX`":.KcP~:Ѻ:iޞLX31JՑrEL]3Fs,FPE?H>+1%֢nӖfaU8S9"&-7<~4.GTtVԏF~8THՏ]DcCޏQG>dz vuC>4d#ײ'5! SǞ`y=lf5WN $jZoH)IMHK4ϕ6-.,ȷiNBҡ;mxPܟ|Jj W%yO?|-nOЌvVZ n}E}J3̦ }@٭S' <\mdb/;_~3Mp;iA4K^쐪BxGp׎Wa*Aze$W_@dKXGJW| FI”7;~lziI6&ݹ [z^Z1eD󺉄s Fx1@2}_5/Tūdr+#|btxc;q2 $Ɨ!VY.b%&Z:k>Sae۟>2"4!^Ye#l.) "SY""f "tT)ϐR9 l1߳/cck5s,Śy_S--W.E};{̍[W }XF @Y<|h_wү٥~yoQVRS4Ng.'(|`=XC,WT&{^ZW'oRGCOkf_{!Mp(Q;y%n wb]hh.V=kO~[3.-J95!j\ ;spl5lHMmV=YOOB6%>29wY(0kN|Yn4lk|%?AV#xoPPD~1OŢ@ב@0IנI FHgp0D2GS؎HI+lސ:GG2XLtQLDX@E FYjDȩntgc]TDy r8:ꪥL2Eu>0a $jKQH c͝]ͩŀ͒uˍqq/"~Z~9XI{ R?#5enǿ'1`U\^xADTWt!|6pu;j ߹2hSs燛_x%3 ۳wØsjzk1<,gS>}\!Ј8~LFr{wY}O1]wgo7WoP`oޝTJMT4NZ w zAJV4ǯi?Q-#s`͒=÷*܍}aX{N{h;ޚ[!nPHP”(d䃃:A%"uxMkyVCDHQtB~O0A2T.-!0'ZULmAZdQ"T[Uaf2U[YT9Ke8 )>^aǭIhy1FUcC@*r<}Xr]Wa`Q1YwhvNEg'NIO 0Dž}Nj=jL{rS-GS'LX`L&QDQtjqC #`@GAZ Ty7d@4XnuT2*A-aQ#1xlp asA)D5L95ڼM8ɘ "Cg.grL%Ns8ҧfG'x"-P[OpI="T!<' W\ӂ`.*ZmAK*1rjb&QHXX)Jb4&Vˈ^;%%ype!H BW!D3D4^)i\i`|(sYן.=vP\HڲGd9~\,W iP1TkN(ioDŽD=$!꜂3I8#[׳wg2_ֶ^}mM7^.?Wj/30Sǡօ+\fUDD;#Q0Hփб y%A1h$$J[!`&ѕy;`^<;:Jֽ!^7)M؄8rz574oQDK-KWQ`-]~`"(O$cL`>D#mMPX3;?AH9ap-{,sO~EB|,ۥ?RF*q~Kʱ",̲U9h wG_h̓vT,PY#cB vgML7cg{0hoef"К).=VbJi3x+KK)?ȥ.`qӵ*/-8MqS] h$ zq? M `)'Џ\ ]F-.׋,:%xIW' H !?W,Jb4[:t@qz_ס.)AO^9'leػ߶q$pw{%|? C6E۽b[E5nG_7lGv섲dN]Do"5Z"[Xh™O'Uz}.ydmO4wgweLʭ㾒OCXs$ %6N#Feũ#Ja)3&DX. T fX[@N/;^U_/ԗa ͚>@J(sc0ۂb>ȝ_*cFJ0N}&8losI⮏A-'xkG:1e:=@QrkvG~ꥴoc;+3:Ķ8uAkN62l>F[ KkqVz^<ݶܕU/EqЕ/yŔB{qxĠ5CJY2H i{?D@cEBER8ТHIՖ+$J^]LU߶OU XtRii,E0b%V}''bU}f{G{MR(Xy]<2OQ)ho(OT$Ж6*!դ|j<" ٧IBm^7G-XW~PpՄ}y̰%OӚ|Z+H%"㨋M]TDHkub#J!Bm#c}P1=Ȭ7?] Q,[!3 8yB9!ZiYP۲3REq؞YbZiv¸7w!ʙ ,M$ҚQ*籾Q} hJz`rās+>}~kC\ >@ K$(QRl݊Gk& jK=Fƍz[ *(WfuOC"6t;eɋ>cm[dpvG6Z0Uhv=$1(xeQLH93*w7q8OqDÜ:b1ر1q$VJR,Z2Of,l{ ݋Xcnlŷ@l,#gY@#N2ݣ\E+HµzƜTJJ23BPX:)_E/,|דn(\'v,\2 ۓf aak6Р?>mKU?s]3iω^?DvȺ޸?'WyeׁZv2h*|{w.\$&ׄwnDx?c^ڭyl-?m4壧ga4?NN$6GOG?"8'ݾ5g0M;cG8:p 0p  /dS٫WO>;{ݛ'g'.3NeXӳ_~~ߧ?~zW=guQks:_apts_{&ntӮ\ήg.}?ToPzYf6Ks,Ưu$c$ZL1ܧwހ4uy/9?8hs} mjM}3#(\/!s}󚩽W5}Ch9#\xe7 qd1ic}lt&%B(I N]iĿ-Xk\WtQ?yyo4=|ɤnlsgVPQӫh=pjc fLI8X )㱠)iKt17NP_W A dUCJ0RkMEB!H(+l vj AFŠ֭ibEMA u=i hI+"'"W2F>~ b&c~[N)'s sA}..Xp*X̔ sV8PkL+Mo) mB0Y%(7"ܩQ5b&=YjkG+1Kyb*8ĠKAS#`'4(LPQ%y UuZƪX}/c.*H$q G딣& `e %1!8s1)I QJQ% S+dãȌI h890S.@jf[5zCf㓭lϽom~h2tdZ''c(%urLw|j$m+Xlm?t`CNF~2MiU@J_?mOew;m(?7AviKi}!9b|u?0R0(is哧}F84wAi!cukFh!h vb (TP5LGSHrH:mC H(!^&YMof0?b"0Gx;y R9p:ĝ*pA48Nd0RHlHLi$ :LƊ!RN`' Vi= /2Ne*0k~nݝX1,sXdTRk3똠8KQLR焈4 K#3,֔TDR$\;>NTiBcSHxKA䆿 Rd tuwgʐAPZ5Aq s)xBѠ-T"@/iS`P{eUwKtAX* o+ͥ ` gXHJ60* Ň`հd\-,*f^ǢWy3O!%=E;]2Iqf#l|1*9GEBRT 2K1sJ--hA5NqӚ8Gk4O5,9!`7U7횤8J/o}o},ƄWQ]?hEPiVy %VˬDY .J>[hyWv[e #~qm&eZ8((:ACpu1w\_(J<ٍMIgjK~&uPG);"@"XVyhB1VިWڵ`q!oWȡU\P]Zt֝:$E5\_TL{MـýgC|'Y 9ZzL@kb{eB2;OJ@ N5+v 3=т&%BCcw Ve dyo洺t8Fwrtx-a^~{ ͣF$s $`mֱqus0n)*tux`9 N@eNc0σ֝;$l:#=Gۉp&8ԡ*?Zz: J]nXa.4١M _ɫ;;M9[]gWarZBc^"F=9P}%ax ^yvU&wlKm0J}DЅ8ķ=~khʳ09 3z@qIpE'K~$o+Ϯ-Qj6 0_piX}0-Yl49 hM3CQf<yXkP^%Ѕdjt^md725JɆ4 j=R$d,S+ ޥ.ā{GC|W} FVT$Y!_6v7 IHi)f*r;ndq&aX) 7 "n|ʡkUCݓBB񭟥#>K޹Z=ҕ():E7kZܭZ=h]7 S4osq0#}Tq\gCSwrBYЭHUgRc:kn: k^%t @+Iv՟~e pc5}̣(FW\c!h߰-@Rl$IDkr; vϋ@5ٻ6$+L6T"yX;uk6rPgY h `CFW9UYYYYQ1A})hi!WC2625j;gӲX6OIsO#AJz;?0v#Br̂ƑB"Oyx+$wUb2vOp(Ҥhb7))I(#F]4٤׏H_ROy)!G{GONfß`dndFOa8 jPF{]>=6Z%#G2 =a04Y@hxk'8Qؿ+$w>^w·Mp`ON5=e>da/b6{ӲTժ?M!ֲXQ+aHeO/_ve@eNx1+BU&V6प^4j,9)V*Ѓ: 23{DˠbR{c0ˆ\TS諻:64P뤏g9XjK&7DPdcCxnb[ vqy1=rJ,sK$+syfCvS0qqհqg5S$R>7,-=Ŕ  ǎ,}!ZvySXU1'%`̐kyǢsS'D{F Ϛ YUM2A`&U!R&9Z}s3t %p$({*s/jŜThZUL:9I߇ TV![NPw1S7_/ړ=|/hY։}ɛⲨ};`Q;>@Dn&-8Wlm+`s{S7q?OaY=pbKĶzvvV*A'0&@RrVKS%jk/hI1J)&emT5&6tS,֮K^8rBNiEƢ:kt+^*[PcL*Ǵ *???27͛^X!_f?]S+hWswKA4-[eOV%bZԬ(7fvf_kÀN(CxXN~lOX,L:TAW)j".[uUU2 :fmgI;KC*ô=UZpyuTPDUI 6x)c3Nڡ]8tF`FqvF+<P:aJQt6]joЉoe&`iMR5ѥogWgfU};9P:3B""ZlZU2ꬶV:z9hyARs9)'|*L ] 0O <~W}kFahc(MMuۿBRIԞ >BW.-19ZKx߿}^W=N#,NW`\*$;3'9=qfcd-~=b9sBCŵaF=q1I_~es1?/xj0%SҋxhfתC}ŷ^>L GNW9T| 5{ڸ|kmL/~mr~6êo&PxO<\ ↌!f=ƃ'!Uo_kzBj6fIu4(zK,p_WL($;fěZ@5Hy+^g=n0DuiȆH~Fg$Qt2ƚR*OtBz@̿oQ9"e?5[8DV79!\Ebޅf6!8#<WopGT9&^H\hGD(E ;o!s3{ooG o6vc(#ϳx),ӢhsJi}ƷMtmP )(o[v.DiŰӁk|RbG ˉ,zg={TIYY'HyV*zWl!3AHև>GE@T2x-)3Bւ>Y0Mc ] %FZx8>X/BL` eb*eV@c#dfTifC$fH/_I\iaDR@+;Ჷ3̫k +>ȷtZ/cgj0Be|BBc WqQG?ރ'utQt8NϏјɩATHJzVTi)oS'Ơc<&(tYL!Z Y җPnmsZ Ȥ|mH!b ٝ-Ks1}~p_w5R-)H3{a"/W|4a}FCm_uL L4sgJE*uW: `Pe)>Xv@S)!p3`.ŹI"ƄtqW#aO Rn~d࿊miVS `iO&QqGm UhP7D嶠( ߹kݜ8)NNX9cB7Pk'^IBu'Y`aOyY{8g5 mޗ AA@ A1\Dsu4|z4[M$=4:**$.|ǴnQ S-8eɛ7=1yӓ16V^^~?8]/ ǘ#4~=8KvO@Rtw7^[=p$&cgUGk<ԃcApTv_ݝBs>LceSc%lu2ݛ׎zf%UjN[Iﵪ*8xurKQn-_oRPrߤ=}Vb0aS{8g1cPb3}=i3unxg`xeܛo :7L/cxoRb4U>;Nd ۓ iyQx|7p] Vsݠgo C3ӷ+ jbX1a lj .s8GBqLsc< 6S , bJʽF{ֵ äɬ{עs3ɍE[GQU QJCp&@jՍA3>i$C[ ʡlN>>+f 6aX(Ls 27ɟSf0 8hKPge:x;* (9$P#Nb5:Il.C/䢘py 5.{C?RJE)a՟*4w9|eݤv$|{z.߿aTYY%I066 ފKO!g"B]k:.}E{!.X]q/'p_WZ,p{V 11WC&0Bt{ԧ8z~#DlW=t̰\aTsީ }ÚNl,㠇q&@ĝ̚eD2MTRϫQ[ҘS).OT |7te50CRްXja*BEWJ-v/` ; Ai. A喖ß`(Q>{|iMl-͛ ^¤D1\rM8F 'tqDP$66^qB3-H4ϱVm(/)UM] q6|I\/~4 0qPgITF u4˻rш EUkdEu6"Q|1}1EaL;IBNB@"ʤ!kBR1c%a! Z= ^6}IDZv`52O3Yc>gTi)kef9; H*VŽ=eawTEl"Sxҹv; Lcv#Ǒ~Y`U({C(lWu̿2*E*%]C'`0"GrnMjjhq($d #!(< 57ŠSWe4h$(uiSѠG!R_o>*rc1 x`cbsn=ʐYhQkoLȄx3ˇep_9:r7Kb&?[*h>o?r,ЃiTp=I÷X8FIr9ig[UB1< O1tClG"ĨG'#&5.N4 ! '/! w:J9dJ+*X0@j`QRS ul;NpDAӘuF~>/x+D*-g*o~B0KI\O% Xs@'e+{Q_襺e/ ձWI/ aa:;(JHdq-~Q}h>\lJdM e}JQbVۑ>r%Ye+ۉ^6p{)=Y:lEtԗD/s{F!#1DDC|o0 -{+bc*6_]le Ʀ`rJ;W{7몼^xjDs54Q<@66LOāHl<^p4Kē۫NV$WODeT[Y[=@![¾pѵ$K0! Uj5#BV̯е?ȲATuZ3fݐ w1.Er=6fo_c6L1" ?igCv 2/WoJv1LK2מ2v'n&64m>b*e mRK w4dڝ,{WDV9vDjٸH8 97 p3`#OU|# }c3'R7 } W[%|YKU/"?lʽ$DsJ~T'H',ZP~ϊo[wuѻz$ c=DՆl%ɉJ8,$,j96P瓍 q57z0o׉W}t!?^O׿yQc\"d@ .LZZ0` [1tCtщtý}7-s ǁS#'U! yTc3ה:1g3?{=2Kxܥ ̒}x g $]fMHDr T+F';bZd ώ8 ~g(*5ٍ2.hrњ>R/XN=E,:\`dۭ֓I/=lwԊ?ʛHJVJk Q G{n3YEf1F>߽"v2zItqM"((10dL~M\ } TdFe~']}̻ƻ򮺲wᾩ(̢y"[#h ؆,Ag#f^B#B'00͘:v'pHpd t]Zhk~{K<&umb.5c(Z3U)dPXm *ގVk a3bYIAF/[g-cLN1FF00%%`Q,oL,o+'l[:bNR|#VVNLgl-*ԊJZ^i ?z@;sڋSxg>9 kxPݞ,q !3 WKG6BeHg2zE\wy:b yCW(lc!x8R8Jp)VXq'zQc^![y뻺f%A$9rΕHY !˕Է`.R͡UѐA֐5 Eպq-,|~o?eX"&inxB9`5*URR慬,JPԍՎzs7<χcx( GN|ǖ>>16fF`b@#6P%~+Rʖ11Tr0\2SGz%*˚v.vubD3b^f^sōyk ϵ44Jt)GmO6MbRϮIrRGgIf)1z{a^8 .Jv\]__^=tjO;ra>;'!)>ԿW= ?XXe7T!joڕx}ܠڽ[yۺ)kl؊_mYjDfJpJhF7o~`g)wɚ}hIE?EXdN%}Wm2W'] d9}b][h)ñlA'mL:IJ-yRi) `LٔZZ 㗗X$]gq(uc/WWv箍NI)C-TL^+X3'}uՐ`:ktT\A6N|0'nÉ;@$GStRh>P\OH,_nnub;e[*фN+܁CO2a ;%G`8#AttrcNj$!͇s=^ .c滑ZP$aƋt2R-NĹ0(|'ja8J>o3A Yzs5Z+B'vfQVxv4Hi6$KΒr*n@herP8X՚ԟ>W~W[u$Q'Y2j+AI83ir⼅5EJlQ&nw.B*Ѓ^˹ω~'ON#򐵳5R!I"(ЖRJ^r'+E Z,QMJ|R dE빯@%jo8F)fW=ބ5VuNօAskeo:Qİn[Cvc%وK%ӝD;6x[\ဖھ|+Ўz|ZYěx1yU1.*l72:̙:[+L$WNEL2^dL?xrI0MA7*5 w9#2Il,YuX7MnsL(ZfYcxP L׿7^1l E[=v}TΟ{n{n{c_p=;o#GRCngLjU*gDqXcCGтePJݔʬ@Z t`E}_trX yTnZ?~q,7#1UUUYEckFJ`_?O 7FC`ӋZ).q/2 x[E Eޤ Z)`BtDT͐WJ%=H Y X*9g!D8YJEM ԭe\)u$ ΖWBq#caKDcjx+>F 7eVa$'*]h[:U 5 = ,nL1S ױ'=\bPQ[$rlF+:s.CܙωP*& %#o-̾'izId^M1؁(s^"xB3_Ɋ |:nYd+8"Ou $N()N`Lf>β@|GXv,>UqAv\z+n_vٶz`9xZHRHcgN `F( K3.ң>\a`f@N21")!P\Hd ti&srx%Riٜٟw2-4)G1"q^8Qk "DPX$y~.x-48(L˼3H~M3҃Mۙe-OSi~ycjH.exγMQTϴ|A!A^dXq<[XmUծ# ɮu;H9/tUJPMUzXZtಫl%l>tj0` "`p]_5n O'r+yǐ2Mm%ևzxrL9S5X0)fNOg V|y VO-wKs;n:=G]7nG&Oӗ6'P|ڿ/ Z:0*\X3Z0jaԪ-e7g"s%4N~-:<'&oiG޲g'#oemytm_O4TXY^Vv3lm&??2u5&mN.IPpZ R:CHbࠈeoq=~ahGP[,m^:lUfkqvsԈj60Z)XO1jINww{kE&t_:֐j ـ'bf?&#QO.r#L%3˥x :| #AѤ…C-t1p`}ıȠ46[ 6|A(슯̃eP[A'*7))Pf5o~g׾/;ZjŮN m c5:aiD4eYeB ϹVqYdSc#Zi\Prՙ%1j҂J8щLaDILJ $ MB< zU6֪CoYMK:M;,/)*!/OΙQ.D+y=}a+Iy݅]  ´J55:n\_Ev8x(cn#3;IwW;KO~LyP* \i j"16郉ɪ/e`q$VYr9 D0Ydn n[ܫU 0{1 !)IYB,NS>|enk 6G6C;~ta7]Hg߇I'XfmL+4en=}:b9'|,a={\ _=5X;˱79Z]wH%1 8Wodm^}g,l=ͻ]/Ա/8xo^"Dxglaj0_̞Y38&B Qc2YQ$?}R̨2HӀ5] J ?̓᷋!}Ӈ4| NN dx mPp DUu.7_΋n2Z5~|!6{mGCgED^BV=[v@6lY5&[L"10U_w4\tX; u̲uڙmŌkۮ[wȝFRe 9Myl.~ȩ/? J ?Rsζ/>S9P iJPG 2ԝ!v/6.CzK^1`+5wύA t ?̚f(q.Y.{ℒ C8O/X ִ# ;_j5~"]z\_aB߰(3<$mvxHV5\k=%RbH1ֆJe;f cLd!a' {0!a K!BLb$e^Y4k,I$4O֒viDPj}\o' } Uy>t(PBiXRq(p_Y0Lf𲾴R˼c?E`@Xbs`+ 7i 'uFNU?=.}ꀨn{=3eSI!ϩ>·mY H!+P[^ pr&_W:ck14AZ *?xmdsaQ~j-iӥ0OH%_/b6|*w<=&+RJ#<Q,R$SAU$;Kh5ţȜ VD(D#hN%f2*d5{Z2^3Zvnz4]tՉ̉ꐓ?Ps.uu2DoMcޡ$i 7'J\1%|)9^mSk #BHIW6UTb_ / ɡ:':Q8f9 q@ (A )8c,u<*\<݆v\0җsTbE:ђ"QJ/ռo0Axc&K'kt:x(5xx%[f'TS:pBi!ŃYWSW3 0n94 $L$ qﻪ;N&WbVH`{:8I2+TveSVej4/Q=NWw/ ̟Cbѫb: @y%J6+m~]Ӏix S@w{E4:ˍ( 둍`Dpy!Mv~=}E\P1MV4L;<>lj1 v W[cPWFnʹB ]a~[*$yQ V$9K9/TBhl=zm]QvA;`̅0.zwCV6%qHOgfoX]!C y; #fjnOV^ >8<%RuT#AMqrw슓[5-,/[1W-9 c-[wNnsgF\$VtN]]?řCbk[$睳}q iszPk[,EViVyyl%bА;CQ;C LMTqcgy̠l:β2e6:3 1X'l!襻htBcN9b;& u?S)?'"8_N.Bp2K9_DӤr^?;! RţT4b(Ap,"e<.8:|6veJʮK @\zwAڔ̟'{ w rlԳ :dXqO:xR/dXs\g6< "2|؇7%Vme7  srpym_|X}$OePr,m IZr20'A~&A'3o\KA߿7okmXEЇ9ݢ8i$)6a#K ) %)%)YJ]5E.wge/-ɝjξhw-8i_Y(NYc0nLM F 7/OB:2B[eHP̍mGGQ)#~C{ ½}f' V

V\PphPSr@!'nK{;`gT/1\ӈk[Lwl!:& ,yCX.V`+@]jFxNZ1AK7,Hl{&//ۭt8I=khLJIL\&/WkLYLAB(!CsAdl-[ D$/.ƼlBDiMzBpٙ)Wc7,qw\uͷHWGv<: =9uDnms :g/^4܂/w@j|@y+XQ2~ ں襾q* s7&usvLFYkj+>s)2銍}WjV o[ xsJۧs1AƔZA چ+vmԡcS.¡ygx]_c7;%i{:+ެ*{_\ol.HR(6jH#8.O!)n@+*XSieVWj^a寧ąfC>4'V8>Nɗ|v}xkODN iIayr₦4qUE;C(VJwxGޱIRW[r_԰U[Xc|*?@_LoOw٥v|v&Y;w1L-v{v@+f8\,EOcdygsDaPTϐ? bi$ /ւ&Sn/[[^6t^@BK@lO/^y`N2uD@QSO?}LOLAU&zn2x=]W$^w~6x-A܏1|4ߕ}9A_9^w忿^]޼Holcp<L>]=i޻<} HvC4p2LAr~R]lÏV.SgU 'iɸ=-Ïi1Eqht'nS__*~__^\TNիL7ZX#;:#5dT4{ZCIg+ rYG O1ֲ>{3D)_{>H4ˆeHd]gfm5aAxmfn2kj^~#fimh9 qP椕G#U>plg̙qb ^|o0Ghřث:w5s{[!4.aFpߵ{gslOx ('tj!`K҆ZR*" 5 ܲ@"`mC<+X`%݀%å棊2@CT16Hx._N$?8{n#i֔q*)y yqx_*"[Oâ y:(Bl6)> xGSư3f4?d6şF-)zO`FbogO=SM(Fo5C霌8BEyZOJQBGF1xL("X Ci 8}(zWTP(zѽЫ9٪rMi-h.qx j5q_2P$4 {*.d%FPWzM'Hsa+)S~ Q🵂cglFBZo2|Aɥqh#RysO`dX(& j2 `RHXD`uxxRi` cTEK p);0=A5linzn3|9]#_9% t+K84x3!*ųbMrc8Ybg3#(f%o䦎__?t\| /r^zw ٿỡ9?goY/+#B3 |@]cT"$R+c`UJ𮬔$#p3\1w9Go$*UڐYBi8:#N.tK).jeI[8;)qv @`mzjS5C=f ܚ^yPQInjT`"aeC4S4 FF` 2sP&T aI?G[o}Cy …6 u[.$moA6I:̡r-5)8"ts"|wȟx,E5䘀rX ^Bރ zyڔ\ZR`](>hΌQh]~CR$e"d*,u%uZei9bv>77nv৥4 QAˀ#N} a ]Zk13*4\!q$ew썐cJ-ru5|-T lXϏ r= Ԣp蘄Lw- +GBd pWPNs"0p,2L̄>i;[> n|K&rV |4r !Pp0#R-2 ] 1dkWqa;pqRh,!V+b9) BBxFzn|ט3Ց z;jP$գG|pTb/B+2ѽpRL5DwG T{ZݍlݫKRԡ(J螣jQGk=c?01]c)6Pac "2BpA4$2"IEH!lqy$$A~2D Ԁi P`} \$|L#WB! +6FȀRƐO|̱`ZC$RYzNWVTr9(=P#GK%6pu} ӲeG` |#]3rEBt F tȏK@#Ϭ7#?x >"Xͨ&Bb́\U)S Ǖ`G?x}˷?]@SlKNKf(_z !pĽ80Y7Yq$69e~G+?НiNtt8D-z1ktG"q##˧> sI/]1h 섍BEx~`a]2oi+r.G 6L0UR>爐6>WOQk9I) l) ۔Un'4'͡d`A\|bOsQB4I5tn=9 M)E .ъ%A׆pFmFnV] c&Y{2xk jЮ؊doN>VH4KH 1lO$Z-c\KEh#ӾM)]Qb1gir…VRtBҡ;2IA߮}vL{ 9/W˻@!-E!8B,6B/R֏"ZIBJʻ@6TkHƗUqX2"ĪJVeU]dXf"[ cK MCFaSj%*du(Va"iAҖ k^6̵fKJLK+ieR&w$Plyܞ'+J8-AVe/*89m*rjQKLJdT%,};.CKwn﷒`֛Y~ VrPj5~w{ %".!@)+RJHyl2 3(?7f{EB(RV19Pk[ז C{@7\;u`tE`5j0}٪vQjJ^/[A %9ɋnj50 ?=LSC< a޿sr)s6eC;|3xg>ezhyB`V\[?~^ xҼW#kr-@[σrwZ/x?[I+8ru80\|-W;'WqGg4QërtkCξc[^Q)[#.#s G6@hr$\fsn_huouɱ Uzagzyj&y3m3s)r5IUjFzhvS7VP:a~˜gw'<Ƶ68*wă[Fbߵ!~*F uQcKyhc l h4Eq[Ϊ~>l˒dm} v|kX@i|gCc]*}`\y}Zc0Cդ~5a5$s|~{"zV>K@UoCF"m\w-oFR7^ִ,[=h9JȖAr=n8] )^?e26fg7eGJuEcơ4ݭXcˍp5:6bԒ$@ܲѯ-yvyt]RT2Bh +(Ъ].t֭lW4<3~Ѐ J:o?"Jţ'2Fv}¬2h!;'QՁ'N 2Z?9>;'ȉbn qs9p}n..SHy]n@D%}xT6s [i؛0 5X% -% CB2qɫ^Pe+%-|Y{uS˚+䅯/>~)N/fH=gIIju2c Z FI2>v%)B;t *q])!H3bBw|"jsAS(6wi[r;& 9sw7{^T0W089^?>jUox p6/<71@g=R>N#X]q 6#FQk>ܐ6To<ʒj `HiE&Ёo1\m[jVշ1ˣNB*vB.:)RzuF]g-d\{dmz m#e$B8UP6 Jl%P*C+EGmkjސe_7/m<[eQEm6f}PJ Р2(XW0{bT+KW]iˉ 7WfejCy*Eo+AmcTWn6k>bΫz/*[vhQ2A3N"4Bj&CmPY $KnPX] bT#65EVE4N"DvBNr8"kaD6TH ddBx@XӒTh)9ŚȢ:b]htZm@ ,[~uƿ/g?`0PF0{qЬ [r" ZA Ub#*BO&`4zYeJNK]"J5q>g5 ?U J @ЋRS%H&&bl} TqUaJ+B,Skq)z$+}FO* Y ǮJF"biIs}:&~$P?]Ε9;}@\azQRds4tw6Z!M $:#Dxapo{로RD?mx;0`({e" %mI]%X7~%`.i4a:h)+=~$從9pC=zuDE`CMpmmR c9yQm g1OYe2/ n0P+,m$6xlBj RzR/dSC׺xKY32S6oC%eOS "ķL}QgtokA Iℌc:5%ٵ/J44뜑kްw'_q44_RƼ'Zv BE㟰yB 및Gb ⻹Kړw>>'T->^ۈ##جϋJ^{mhB҃3xVpLQ#< y/em=is'+*"~rN^DF3@ƛI2M2lI-ʣFVeYUR+[*B0^ۊD w@w*߆YKlG.G55Pڛ2 -N' Q%0JI㎠S+ 9Ѭqw"XKP5Ts[[纡?;jIhKX" V0G0l5{{\Rkyۯ7n 3+EXd=`nxӻ?_F#茱íFB!UC|cp)Kd}X)fl^Q anC)S-dž;ߥB#;:OBSQ4DjGa;P)U.It$ԔHP^b%z7HƔo{15aK&4SZF7%R% f |:tږl2Z5K"QI}7W?5#s/_ɿ|?/X!-z-[ ƍ-!ԛՃyͷܯdvnNCؖHp}̶YbMxc_|R֪υGn]SpNCμ)ݺ/;mF9Gntw n;0i8mnv-E9.ӧ$?n$Gntw n;JFnH[r]4O!w()xX BN;x])22^R3)%=2RWq{fwGmX/ՍgE jSʢfeեNCmiY_EIqw;_4҂PByI)RF}˴V_/f*%k0"Zgԣ2mB !D xB$J|ZkNNI6B 29Y_Ujv٭Cbrk2`١x\$غ UsYaPNX= Bȋ:Y%6UJ$S>i "sAWaf&*Ҕ(UF?@i'hYzfe!z'xWg zOa` r!^lqP2Þw4S Tg4ؔC1 yU AITHVf5(QaqJ#4EZhLYLS"2yL}-EFt(]}|ymSȺt!J2~H4NO%Z3ٝ3RЩ]!= R=|WeވEO~.B_`D^9l!>{3lbS](qč!OCAb0RxtˁS<;ljCNQ83:JDbc˹Q"edzQ&̍{58~éGx 6m$oHkk`wMNu:,ﲹ9 @Lu[=:cQ3(!]VWr@1< "JdcI\6YAk`=k'Bƒ(`~t'Ճ>9g>%J9= \$r i\]BkIl\Bgj{6_.hݠ }F}4W߬^lZ$R$%ʖѦEgvvfwv6o{8MgTz%?2 mY幓eNvWO9YZfOۧ]UIi Z+[kJeW(S<TN>4k6M}B l kG컋Yvj,lwC!2 fU_uzR)1\jP+Xe ShIR"Mi7sJ1תҸ iLAĬqwqD)RsxY|T40nO%6D6`E~3:A(9f $ ݜ _ &(m8-\$۾Vc/(M lD(G["Hַ;iVz3UG ݻA00;wݗ#;LR$t 'J0|P Fh̝ZfMi9QDA?sԊq!xa Q5C=/(&?C2;X\L4Qkpzp0f߿ePZw|ѧY \- kDt&lK%4O WFQ¹M!)oc`BU}$~^z[J PHb콑搑RR !3{CA *'Q'9FT[aH*-KZOVia5h"8(02EF{{^LM avm:g%-<o.${c~:}~ׅx4`^_tSL t>'O~Es;G4 G+=$}Pmb/G}S gh@s6[*A#M*S΅QVl@AҶ Rz;s`2 $tJRn&6Eq,ܬZuUAa =fj}u]E ,8.#RT߆7 \X *l-BTV!c7Mud1A}ݵRw1]oFh镄{ʅE^G!g[T|1e4dso37g%7}6܎'IL*r-Sc*>3L>36Oɬ==I?7h9)f{YpY:5t̚y4{x ymj| 0;GzБp; ot.ړGh~Jphw6=wb^Y̬4b~Z*;blzeSݦq9|-:+&Vdt;$GW/6ETduT( [oَBCL$& ʘtA1pf&RJ>PrhWBrJ{7fhg 󖿆ۇib p1*ƌ^tLUz7f;^F@}Stxܿބ=o29z {Y'OL߹5/#8kӕQA84(zhx䒒DrBʜ&hе[DiᢑˡI Jָe\z\n+ %Z.HIm_ZU8m Yq>ƻj U,lǔr"טs.*MNMxSYVhG&sN PD9{Q><$J>G?-(>bdMEc5kBls4SCs -4!knF/W5YVJLtqMx:[1;Sq3%|O#B问8KVƘYLK'9?Mn߾\nvHKDSYF'qס"^;P3ږ |gud|8m*䕯\趥\RYu퓒ZHEڲ[1bi;'k-s4b2EY)Q0}22W3:e"}Wv@`Iy&% ^fySfCᶭ~Y|X:|,>aJΥ^`Z9nBw6!Ѣ"]jQCFRRLX"J?O' #sClMI)'I=f^?z5,n~v(ښ)Yn!qԌ_5=2%zdč7="T_ sc&kQEt(߆D J=2גkioD>f3iCg7IyvdD7[\.% ~Jx8M[x5tk@pkN>޹dˎ2=H+<^Nt+y1vTʏ:CZrE`j[l`c&vXL^}r>M?Ң 8'7_ 3-G_D5s{rւT>ycTOyo.Y(ǿfNCޘ`ɀ/9]uLܗNxIϫ9Li~;ҟ}7a< )d' }/ֶX۾m?ofD{E\jaܻ4gJFp57K' 2R Đ`y)4 >;W(v ZfՎ;&fr?-0|'L{b6ٙ| ?r1|=^Xh |68e/Yv??_v G 낁'sz>O}6;7?`"Za0ځO~ 3vJ{H1Lt;sbyEZ"B^jO3 pjԝ7:7Q)B+%2݋*HV@t)+s)AiL9aJRM#a)B-mnxqWhl);3n#E"}/&b"}/&f*b PP> ie'4eB6Pl GU;V|gXd*XEb^`V?"3lKs 5HOFЎ֏Uҏl4(A GQ8g)RB,!Y=lUyTx$ w1׹޿ oʦΌ|-wP$OX.Ffo: W4G~ ao A4rs]>FP .PՕr mlK 4Ö ȕu7%m͞Q ݳ.o 0.];33=p,:1:39tZ{4{xJpC4ep9$8\=i\Kc{']sǑWXrKCUtᒲ>SRyJA!@JU@r؝",1LOwO?v%kX=6'Auӻί^tԅ oݦ>>Eaa98(הLhh®n3=EyU40 ^|>q$n1-g--8QXGqM#UΔpYp{F0MEjV"8#^ U^"&2i GIZ%Ta,8aXvъpz9e@nニg[B$1_LbKqNa2h \X8^ 7Q)@b sq$ZxRƻ:ۛ6 c3^xl׈.I+EJXhhG '6 (Z/| 3m*h+/T/ iBg}fԄFdo\Y I̪m*l VN_ v# Gzig&:AĞu)H`768\Dڢh3[nIT2/_2q&F"%*ɘ4m VY0)~I Ӈ?g9|5ofSf7iYVٛͻOԔ"|QR HaIDQS 3J8K7!E "\s'h&ZFa%R8 A9Lu2$dKna ,`\K'NF B =Z.zE0 \b41#^Ifiװ܁G+U@.tCKdl><ΗYݻ.*ed7z\}~U ^3=MɩZ`vyq7o:ot_ED/ERWNbq Ż6Ih\h`~eZ%J+CRYŵ1 <(X<BYa;%HӢoSR\unò朖8 lB8FqX #; l-8`8=c74ZF b`R+"X Xĸ 9hةK*!2E > BoWߩM!HizXaf!E\ BtB VBp!#G`[/Q#C~"7># ZRV6bw:W>"B( OCQG89 "0  _Y/D&vH 蔒#]4D") CJ'aa&a_7Q:[&Q y)EP;Q^Э{R6&Ӓz j i炠hCck؉7TZ_TJrƆm)16`gXuվ4S)IT%X~΂劑bm2MIH͙Du=xR#r'I[UUo_\mtu#ȂUii2Jl%*[ƈz=WQ"-aF9cDZF-g|sPQ|Շ,<&K8kpSꭇr܆= a]=tFF e;?ļgeĔ m. mGz2*Ն U'( ||p=CaTF7 HM󭆆YHbh\,5WpoXG,Eʥ o^ *!7Xz+7aWS{I=Eᐢ3eg/ ǥrgU>%B>%֬N[R^TJXsLV^d#'Vhs:8(т,"u:, m"[dÇR4}ܣIJ{3qif?K'[YfwXTqRtytna^iE~H~O?׋fP+CZ^Og:.Atet׀_n;n^؜PwڅdCMݪT}J@)Η`YWZeh}zҁRśC{oOZZHxm\_4~K S$P:%xB #(DZo4 {v8 X%+繫82b ȀorݸgWT=bI^0h&H?.EyǏCJ.L^1Zī#{mO;F<`O@SxgB." \)/6poVe"4?̱/[^5=v$x%+龜ru] WοS0U  7?O{kv_UUK`KDxߋ*d7,p_v>n:;*RH)Yn{ K7ȿ5V^e WTa}Snj򫗲mt50KnzkO^2)ht` 9L ꜍HA[YcJDHʌR%a12U՚Tӣ]f%oPG{h5k×O8!)=v܄K~qΤ\ _BT ݏ/̖_-?``{P1O+U=Nÿ,o{Vw=Hia>T$(DG1&ԷE6NE" at0_&0 ((:U"NaNGZl^#XtX\SHRyϐw!s7ˣwT9@Q~b0hɄE"M1v֣[|3o-ct40Ȇ)!3 ObȬ][-4jES;F:̱+}$4p("0`&dA Fc{X0lI2%Eh|MK?OlX ofvX7aZLf_^X%hLaMnb"y.=UjNjx?l ?r`/=.`Y.S#u6߂-Ku#ʃo'MIL/}e~5eZ?\Woer&e0̮ Ê [N9]>n.`;@L y YV|54Zp!w KHPSDT83҃&H/ I㜌c%.W5@^-d;w<,D Z҄Ƶ܄Bԍ-7Whʃu_ӌg1YɔDiTbfI*+Z@p$eہ;8&oxyB&0o^vpÒ) WWÊGA_o2: wrh_=/䧇TQׅ_h}m>| +yQŨA|>dlv'Me<]SdyMT *JÚY|ڛw6TP*.NXJ]7MKqJwpޖw}kfulvVgWRyN&xNTk{ODi S+T5w;S۶(v .vx8~Aڼ LR/r6a39v7)Lk45ǀ&˽"&]GKm5 LȂEE"(TID>;!qEW8p|Z\)6} 2Ǩ!j+ b$zⴊ$;F)F9J=%-O0>9w}tؓgw$#z irO}|?)I|vQ+vh6Õ㟿{;Bz*ϟ$?<}~]Q>JwxR-|G~ ˍ*5|'W]~gfmq>B©|?sD_}.8=T'#EcEIQhQAD nWG:G.ԑJ%63o~lf+@Yؓea{?o&͕xGC ]qc+p53nxLj!gBy>Xko^}e3U7YnWo=t<Ōѩp XOF_ 8*X|n3C\\#3 z\,UpoXG,[:]κTޣsZƪaމS'(E1ȭa-0'&mb(0*cKgּmC9\r.9߬XyûX [Y(ORY#b.=W(Vh| H7_/iVϩ킳nj|{?x`-p 4&r5SY:.p5$ }%"|MYRsxOqj; ;(u?op:8 Kb>M_^1L&p3|>L`0QG=YKdD!Ek=s@Ԃ8Fp00j#"nŗb|$v1ہDd2GJk[RmmIUEXu&v.%9 :J14NFy:xԍ,uZn'z~>ۛѭ́%W/[l_8*%oMoG/cU  tOR?Zj j5uRe]riY7:[EG MXsx>UȖg_ERE%T`RT/P'1 !چkSυp[ jԃd?j vP^ڂ",a[a'10%/߇Owb%!-c=(sQ\%/nzB$ 񰪩!i[S]%ު[3.h˝vƽUZyU2(=0p GTE{Te *mX PE#Tɨ(SDȐ)%Np(.U,<Ҵ`8m@,u\ê !A o_+.&P"ݔó-֢BV+.\7U>ѾK8m._û%c=7;pe:w:V -wW ,9~U4 iՙPz%֛ ӑڜ[>$.e.~aO++ҁ$vCP 4;ـZ#!2~=en-b+أw|iD~.Gq߶T\p  u2"!F(E--|xڙ7g`U0 m#)<{#΢s1Xm$hDD"HeT-^V2Nh_7>&M w<1 i;)-fŇ]9վe`N{pAyDV9H=79/WGm˼$;CûG~Vv- Ͻ,߱wo_(ah4,^&iXss& Br(I7IMgN Ez!\O^W> $? I ?,-JRn^2M 8!bi~#E 8xȤ:NN+ ADž*.k:U>TZU{zZ"3DZnb-_'w׿T^g/Wp+ZC\Ra`TP H;jh=U;Os[QęBqcMqbvHEK6qj\FSLhQݓzR A.٬I5=XgLC)*%2P4:)Ihi xA-%VG @rT-(ϷZA|Ѿ2O;x?N9NQc[iKP i-N* /~czuCuVeԲBod+c rXwK;27*N:>99]B5W*#9tz- ~JȎGVC'7ex[K ?w9>ۦk/o΋Gao?4k &K8=NdAƏ߰zf%E$R, KP)iچ.5YnEB1_~Mm{O eG•}r-sj`ݯ'x]JlyFyYy,5:iXZAO?IL:om<Y=v2Z5X+r[\bH9F@ 9$fK#}dP-Q|\(}L֩zVM:֚-#HW>+{k4W'̅q]W:TؖgViYÃ=gUiЄsۿE]_M:˭1]#؅d`J0l|F\Kl;0+IyW}Ot/ha\ ϳ oIHiͼP*F;@ Jj8lc ۜIIlJ ?k`3ՈwzFJN$(3Z7vMpAT)m.)txUnd;8ZQlz [Vdu1sJ.O~Ka]ȩ3J89ʓ*O(a͕VLCQ;(W! i 54(=E P9 k5J$ڜZp,D 6h닇\?~_UKQ]7f#Wş[(lD$tQB6z kBs z mEyZC*"Ig,R!@)מ)(IJM42Ȝ6ٓi"*Ů1Ob놨&%mo ƨǬG~}ay|h%Ϳ~B|RG*ݨ Fև~Uhu@_~4b5%' Hԓ٨TM Ggz??!'jY?!Q.z>@3R]'esav`)w( ^WJ\Le2X҆S VaT .#25QuW~`w*n;˘9,mjǶ ? Y˨볽+%۾5X}Dtrx`]6! oC*a& $_*ګNi@Qh]1~@j꼐g1-AN0&WJ0b(AiymT2؂&+l:J+ bkXRUݙ5W{O ;sa0+}De+Nٜ1HTi„R=ǮEWXAE>'"ƈsX/G.=xDž,hpQIwB ;c\Dx^!(//* ϡ7'GO_@,agQ9mxIfS \z^R)'Άs%yYӗpx)S% 7dKzq0⒡.'?njbP [LlS̕$l &4A Cd Rj!TP,C`HH!hi2HVP^BYCxW%|NmQZfr!'eނ##ZFBP-H ӜD4t=ou}τ7 j=8"g*ƈ4v72'X9E 4N' qHTv0e=&Cm\F88: 8eٟ**y-YIX";G)d-|Bl5:i6ZP 7!;Q)Q2ay4eQ4qB`1Ip ˸J)YZ:1d/9KNT wMB*DͩKoqLd(,R'+ϸd^kYqSIQi4rkqM`rvky|߷3|d@nbהGMhutEq%bA_l=ݟ_ >+_sJ¶dHz*D3^ wf!M0Jo=_)ZS4h -l̫ufQk,Ug˻^/JMLua>Vʸ鸸}wPYM Hr*\D}Z ]@\7^ș!9s2bٱz H{.g V":Kr-``,&`9bc+3-_CG"c;uCI/H㓍1,L`G\={KUBjHBs(vnϮT4K Q4ig|Z$=}a:MsfFϋ^t]e2]We^7MCq9si !\5~buZ9;h<m$Y/]*}" ,&Hl6Ȓ.,濟jJ)EX$(vW]nl9 1>8ejZi[s_inKT6ݭCl4WF4vN1c}@Xv>nĈ/\ XԅX`ӥEr(ﴀ|pWZ+ʥwv|:'$d $Iݓ?8zT"b8HR*NLJcjm lсWxu[H)P vH"@9aS٨̢T$)e`aTl׭"MFv{5_iCW ӂAvll  P&g<6ҊHњAۗQq6u d[Jl\``oOLw]…lQ(e'9'؂'( Di"z>8KpئU!$ދLuz).q҂ Z`1T =<}co?~p{@u1Tb=]$Ñ^QyĢe[AX[RNp ]'-'@}>iiHT Gҫ?No;V͢ǿ!5 Ka\^bjt[Phv> +)F3LOAlIAB[[Ff[|cR*;v/_8dcАsN^/NZ݇_>d8Z@!)ځu3󷳯-+pc~sv͝ǝ~l`hxkߌNcE'3{~w'3Y %9Z YXFSz=9FY뎹޺u @?Rd]d>r&;`І+w.GPi2:_'8> ~;˶?ŤD0pS+jd!ԢqTSu5웟W =!1b3GÇg( btZķUHG\:yC{pc:S~Y!iw2<_yŤ$y Gش~ {}o S\ٟ# 3/}윴INTK:ISme381$7a-~yx;pX!U1 >*ٵX\ #H25E{{MGw[3kC.ǥ,N}5bOͭ]_j?edv&+:( ܈9i9JϮbF)XȜ/aEN<+%As>,X imsЄ'3[ΥIL]ُ Z1J> Һu0K?`.Ԁ>[ jq9j}Mv~iNfi6qvΥB}x}߭|5{<zF|v?Rw`7ךP,=Jy |UoOimd YJ~a4/'7gr:^AL`(b~[Hd mH MB6vJ;?j!-1]RfѪh'&>b?g^Z:h[v~vUSqgUOQ \Օ DGy B/_8LRTӴXk6vɮըs:˖}ٟx]=G~NŻĢ+;Pk&<%[xW{usTj˶mzv!p7kVq[k}j[i]j /2ՄhqpR9 Q HLjʁBI0)6D´JHJhRju$ITA߽ZБS5n{:Պ"pڗtk[Gd $YO-IF@^3$Hq"g:љ@ Yj2$ q$2hOMhq=[{dQA '6}kƓ^ 6;nq\mzbGغ-.-lYI ?o׃7^x=xSԃ78)Y pG"B\HfJ`f:KN8,M8'.`F?|̱F\GYޚ9mtcވ27XiF. _>N~RxG|v?N]+ɧn%ʟV`3 "c3yj2,|7~"L1 um)Kh8Zbm8^7gKooLm|+wfի{35'6CAPWLX$[|XSyCeR=uLBRemj9 NVp&$IL`^LRTHrNjUy}vhqq-hb%X6 ^|h&-3&:2#T!Jr*ڳr`E?f]X:th[woC5?*6繱.a% .%a0(I4V1`b8XH"8qG^+LZW. *5 )Knt)f- bX5pνñS9|f)ZFKH[t>ݛ~Bydg|3Kk)i{PNL8ctx_4Q} B>oßADZƩ#pq:2|΃9Ŷ HPz>py>G>[OKоV1ƟvsH0:7-!ɑT(22 ǹ(VFeNdfB8*Xeo-IV`3i=pRbnmzj/$g]<].$X&_) pBNܹ͜qr;+BTJW! YZI/BƆ]rMDb]XJ)y#l_,{ vЬS>vK!x&L 4nj)Ģ1_9 ]ŠsݝQ`0gKRƌj'"Zg aށVUOEX#+Egpğ13|L" acAGX&iKku-!֥ӹAFdaC+\oii*G3yP3ncO:#SHc.\unP}!(cBQ2-l UlH5As V&yv<Ǖtr <kv&#_` Fa*e.>K%/Nc]HQ |p_5QbuquQuYm7BGA/nV/)F(I5P>%Du^۷]在sT{ȟdpc?E<:/>W_6}'(ʅ5Г>ZL='ntgYI ?o1o|>Ǽ7||<9U*:uDkjK3F BrF1.rڱ 8c5:z7/n]Gzj1\Gq23Z~q iObN`bkP-8#@Z(|@kq~o|O[)"`K}I!cVD& ̀ZLQ 1rSQ,x]bӁŦ,M ?M )RE:3aD))eNU`CT2cܤJg:*ņa_x?!'Q!@ 6+Ba2Н Y)*Xϩ֬R)2{.!lBlI0.!!`R"4lh*YL D}Q6̅fͅEi0$N7s:s.NQvM& * 7SrlQ}߃"(]&[&|3~}kIߑwo_X'񯆏~߂cn{-q/7W` Wޯ&0B^_8D]ߙM6}XM'\00dD$ٻ6r,W b~ F4;iLON Y`+AgPTeUT)H?"sxn<|P3^ $ehfJ1y+,[bXEVT)*Jɝ(EkQUFQ#!x'mL@Z0K*"-Β[pxJǂ nTP#JlZ 7i[%yF]]a/ # B4{ 754DiI)Jj wB-w"l"S3x@ lDPRFJdҩ~oB ߒrv[vf0 `%g#ϵ# AX))cPEXĕ :dZ˭D0ƵB! rLgh,ʳȑelL3pTk,1BxOh +*[Rr$w+MJ y$eKZM1[_⃠-w1)ub`9 @*;x]Rp-6=8Sd18-91YZ .A1 ~sg'(]=!{+G]G%~loK˷v|^\umMiǷo`k+D?.^l X{_ucQ/w!C]U^o {4ۄ!PLut*bfH6S \Ժk|w#7-&Z)0`X t4S ÃT-]]",Fkj>E]P|/ZQV} i̗Rw\܀Ѩ &(x23sȹk{?gC ɂkCY+ RXjpS1+eC fr](ʁy?L`X-UX+c)n\6q&A Jf9<)U9Q4&{ZBag5H߾΢,Zg h'߂*H AbVF/BLO(+7P 8Ry vc4hA2,hZՖ;L"hXO#*F9@s΋u=A BNctdvNםҎםur:kM60oԅ1˵rF'f4jՉ1ud(E](gs;"$.M!S˭x۱Ep(ǥk?O1a_Exapu1 Ӛ0v٘h_f)jEtJn+@|t\ktf;fc{e-P1i}p"Aȝe'IwLX0 z!u!iCH@BBY9IviM¤&!$LK%\0iI4Q @jt<\N%r'St5l (JԬE[].n4OFج/C3jC WM0rDk@ #U.\%<.NBS\jnbvxZHZK)j>x[NHSZ`eII`;`q϶@H>Z=~IWq35LEՎŁWK*VTAlGu"m YГqA~ɲ@)7|byP}>Peo8G\Y ޠpTjd.kqRՌl\!Q ?9f=ZMuŒIZg8k0 uUw/(4W‡=(&HO|ˣ+@hT+H'WGJ{rT#-;ݤ[tKczЭ%`Dk\xskU>Ϸ },N8ode+>M Vm6̣Y@/4< ee*Ƭٷ7-UXs*佣H D8*cqZP"}ËJK"GUa4ku{ &e %uCGJi.`;Wv!k aujfׇA!Gk0>\-OL@:^6h]IE7BTxF@r/fjFrUwsRP;+}-Wafꌵ69{[ å>ClE'BP% kP1C>]3prPdA!!r=Ek >=W~X.woc g/=3&hx kC;^dz7"%EN"4-G4xP`?FK#Wӿ)Z**IT&B*6Z{حS{Cd=E:X5R~ $ Dﻡ#)(#=6 emWo2X@$/+t{! Jh98Ak$Hs>0 k(CX{ U1'zi ,e|/ /07ٲanB2p}& ?l{ ?Lk C5%,Y5M[ȄMI.n65lӆbKqطƝ$ >=)4Q=.ΪS#9ee#*'vHmx.sXO)bxbkDHICi.^|I$ICƓďJ gOY 41̨=w ,*&3C0[1inKj`)C+5w,f%z`MSͶc϶# .% nkx}kM6+Nhr։ZQk[QZWZ`=Vr5BmpUMY(i+4z> y#bbVmG''$ޱG4P{Mɼ^L!I+SSu5>kP=U;QTرsVjOq >hQH&vqxReb9J&*q>ð}+i`Y1;ḓ},oLkF$3O[ 0$mCir\ݺ{̂-]t/0*"zDF=sw/r>*8 COQG9y"'plqp5 qc5 xdu5[ʧ)G E)& ,$̠$5=wXni3vטq#f,T#Ȕ12p5HL+"-N`//L-A1ĆE[tFLp.lL\{ PscsT4=mqoLLǤК.z,í eJ|4%u:99bJI07dOw%z?EL%됄kc]Ibtz.SOFvTXFzDNSnI $JB#DrTF7k*FZ?u)ikHVr4:') ո\br091y58Q>1(ZO 70'J Pd' R: p;aybݾ|3Ze?v!ׁZV:O4HR .h@!,0Dr(NpE3MjrKݲ \} a~?i$CsC9VxAQ+RjATh˃ wD"8|;ebmm0;mQ= E6$F;s"pO160$Sd~z9_"i6_em9\Oqe`F.uwKAo>DrXx/?{ gz_r2,Ef,Nd۾$lYrdy&sֽؒ[}Qk,$V]X*VOnKO>-LҦI&E\Rk1pcoZ1w韟&8򷰂^y-;ZcCJP-,/i<}_ P1U~ # 76]{8VtIf4a׵D&(<'VZt ސb*x rGTQ߾᳑Ef<&!B8Lr'~w5 pgQmb: RcdigBnMz%Fx3+/N}BA1ٍ6ɘ-=QrP\fYFNw,YmlR/U{N7Ȫj8yqPh3A8]s.HRLY%sƌwƈY9D.yrdaLI tv%p ה @'URjtM5lȵeR%@6G'J-h0T[K9e0 f@RTB㥀bl(  6Ei-Z INkEP^)i;>Z;2 Cj Iu*DK_ R `=\;.a2/jK̻_<̢K4{`4X/jX˞K#۠HlAH#ʙ-t1nQ}};_?G~L`B1Œ!ww 7~7FvJ rk'{aT]J,ռɆ~R(Y[}JB/YM5Qt$͘T@X{ i~\Be7P7lʔHPX հ|\U F"\"$/~7QrfGl1f7W;bC4 n;`t=t0cƴжbU z|cXXP'*N =ZVxO, һ'ޥѲ[uSȚ?H\_MW_4q!= Q{4ݞYyf$䅋h-*tkІvS}R%ňjeʁf# Z0nsU?<3As*0q+`EVNy>loM¿eޣOuRp>9)$*{%[^BoszN(qG#RxGTw8Rhg8@; 4vM.jތs}az-%}É*v0wR /UCVoݽu%BvS uߪlPp%X`[`+s+-{s/C_-IpF Jl+vT{U"Mχg'EZ\vڭ+JzQ#vR~9B\(.%/X ŞGv7v|6жC-hz|􈿦ƿsQ[.ϨVˉ1쐦7/;kMz_Yd .q-@sYp@O,YhY V>h2/!Aj\_Mzfc >U7/6`I|a|Kjzd# erKmI8$6EԞA)ps`! *c> [p \)r1Hz 0uR5nH]Q+ɸiY]z&M%K L=n/ 9Ҋiq$WUd@U:.ԐQKTDk$ *ue85o 493F=ӬZ y$Rf( 2J,dDE&m*4'@ ΤJgRÛn.+g->_6_] ݇(ehq:wی2xɒL"\0*\]]/_}ˌp|w+Bnx6k ǑLKA c4tˌ䵖 O>STNtBDFIɗqY|2HڶIxϦDzRH?O! }zp -I;ƫ5ԎǼ3o{Wc'#a<&I"I{&;O>_RnT \v|VgSXmKf8&'"k<i .# w:(r*6c2cVN߫Ru4gPRF ,He AG'VIRrBD;wTjEW{>Q>1$-68o<8e;&8;niR)m !ĠS8Zr\reFDP]-伥=A`K πמjD 3F@Xd)O 5C8Tɭqwwu4(MByo{s؈VDMW.7DDFqv~f}kF]d?UPr&AD2abtnͱZ 6族֩Ӏw94v n˖ܗUY À 'DSw|Z7By>78ԕ ;5S#P_3fK3@R-m˨VYY%lWC,wݦQ'ś!Ӄaq.RUӹ9Yz {_~ATBh4[j͊ ~+g~~J!7CL`0-197Lrï1 8Ѵӂ0yUCz9u.O" )IF̆g-v_ß4ܐ:@n]juklz=Zn3钰i\zu,9wߝS #Srj&9'Ci$5GY~|v]G8FtQ)|0QxuN/tmTT[SɴAN@gUɧ:g:]sNQLʦ0JERm<e9hbM("CJ~EH6,r J5FKQ6-QNEa>j*UDpn#Ѫ5ݩ4r<Քg;W!NvTqZ$7H,YCdڜwE%zиUSnC3RNKqEh2F +D 8+i$ARN4m{exBK$õZf֠"Ĥ>@+!]K- bD fLG51(/Fr3Cͅ&L0Ym*mP᪸:Z~Q.䵯Fvt% ?j8):{Whi4ng:Ѓf'S'SAP 9hӀە#Q,s0F3of$Ϙp6qfQV3" e\-49%VU*P(eIeY\*Zc({aG]V`޺J0]*Sħf)Zj!5Y\:EJY\PRP.[ AUT~[uY[BDVsT||:դ}n0[劶UnHÆ4`.ZU-Cn~Q(\r# PKIŐHZPԔC kcTi@heSx cܨ%+F%Ɗj$~UX Tٟ ` (Ԑt Am)`ٸ^͑hܲ&-FTG#*A|j:.J愊A77{ C" =6^B+ۃ7QBUsASS^Ch5!\ Kl ݛ.?]}nбFڡ'UCB˪Rz~z.-!hϸwrιpp]ԉ)^"R=bK[xUnxZxT⨦:*d睳MVFš@n^m<=6H0Yϕ+l{s.nsF*J{D ϔv;;]@M4-pӴ mL_9vu˻3IHNuњq1=v ڜ=c5m[EAu|qcY/'{_ !qs2dyI`pih,Cdro5%[-jn <^(buuwWGm"BB. ӡ=L$B ',gxD$fS~ɖL-kB5S*Cj/nMP#86DjXG`"숈9 8қ :8xuX z-0ӢrQs\;M =7۶dّ*{\ g5'ce!f|^Cۏ[ 87~~of>= Aihj2/)TɁ[ )پAįP8rR{)0"{X eWI!*JC4%1YSAP_]O A(q+!Rkg.'ӿ68*HZ~FҞNq](Bg-5S_":`#w)Z)i7rE{UMJB'Y3x,ϴ/b:MR{ta}J R"܎ӹw8ɫDۨ%K*͝$$72?Kٖ`Ǔ7_̖ ^E>P^Df:>fA&3k}DY?g8`'sP[0]\ms}v钬-v@W^+~6Mvgns,_0p@x5O'y%=Cp^U:ޭB)ƺ3Nݚ37 b';ލ!G>wTμ[8{wkBDCl oz77G>wμ[xwkBD)s^]x%]/fz_eMl:G/  B5nj+XD&F ÕcΤsw !b&e@u.ݘ3?ƩM2~~2[koߺD3ٕX* $BPeYX g*s*E4T1W;%WWэ+6']EIVF}_Zv>}͖(X7>.2VK(Lj1hM&,_)[#[ [l.?& cHꫢxf %w<޺r9/l<#Dq1lcWZ].W؄>G1ڼd|mAbfKO8],zCv w]9d/pm%^R͚E g(39q.3@H+ )aJl1/S_A3]h]H.2JKĺc4>ޕ@auad[$fS2ߧG_)ŋrS)tR1H e x.E'\ E^CInpJ%|7ŴZ5Z2Q֒:KAP.Z8]<<- #=(bD 8oyZL9*|)f%H3OR9Ɍ*d JLu ;C Ilўhix2Z&!l)|:9I fJMh AYD"%ez-eID^,)1Y fꄧIs"M2X28+yZ)-4R~Uk.^Z{?o.ٖч)[,?vq]j.(_,xe>qGBk%!7o~K1/w@cxx α?a@?ݢ/U %]:xOLam'+A.q2z^`NDZ]0qURc+@v'?x ʛJmXǎS,qT5HxıQՁ^aS3$՛BXRBy915e-DӁ͕ZPQNwj@%KCMɖv+=x]QöEƋk*P:Z_/xWh!ֺF{UhR2$ ʪsUй %S~ݭAiw-6恚Um!7KVi# X&w~[&`Lz]m c഻.ju7tg*'#4rs*dɂXhV@B,-8"""U(! :+PJy. Nٮ ܃\!yM)?}5[϶OGBGdR t/1uC-+ݩ!zIm86s*j3BLFpf. \ j_)i%䊬A~HS[vM \4״0}n (ze5\H"ǯ1IJAtBQz˺,g֕uMEފ*Wd^*STF]v8z Mp::c:0}};hBA׫@% E9LRpiQNڞɭ(ƹluf}AJ2‡Y6e !leUcS>%EIWY"x?+XLL(i*3$gXSd9Cu47Ijs3덬:$_-h@idze[pQKk fr$҇1q""HDʁTN=DLmCbBh=X&XȮR.ޗ~KI#K-H٫kk>%܃vszŠhaƂފiׂ@ryB փJ3h~&T,Q/sJߛue &A:mMb!1Ww>UT1f-Jq'i&{RRIRQ R_BJ,r*pQ mՐ%" 1ΕG;"t726$U/묺s-VJѽ"[!>\E HI*e2RvtdFD=?XqS, ÉHMwT_G\^G*ZH%;d{N&4x( A$Ŵ DQ@X q N! v-2.j_[kܲ =^jNIz)uRk(͎Q<#.ȪjYjٕ&f:>fF,|uے|$7 ~&q$MT'c`*nu r!}ƚMsCr&bSұ;ލB11gn=4Sws-ӻ5a!gnA6v 5Nn\ "v tBQǻѰyޭ 9s )=wAx@ L'[{$!ܝw Άn XșhMIuTRv r{ ndF#~oo9L&bĮ#!7RsO2lˤ#ym&J25]%Q!N-t? PXzN>* RSԡrBpoO܃ {QCs xw1I{NX8| []zj*X8u!kMjV7T"P)s7A> >eAj6/בT"Im0`CK/ ը;# EQEHGw"S"U8A Wї94%>dlDd%ڴ߶WPqqpGAUsx:5X5#!}DфLC"e ʋu&4!^z+pR~?3K'|f_G*9^ pEpGmp^uݖy[PkE[ e@Im:z֧K,+MB{ NkfP}^3cSL=Oc6 tz.mS mL r^MQC S`0G>W+zo? E/Cm h6qjfsSOuULXFL 0'Zepd\&` f~kɌ~y_f]^E7,4HBt,fIVM;]t?hS}u,'S OĈ1炏/aAU ?ZIMvo|V‚flkh2}+juad;LL}Lw>\=څWNz^'e*0D%Lh1)hLPIɥhRE( Mf&-5%'Vr!rMqHn0đqBy"R$Ik-d$W Y!M@Z) iRIS SaLT%IR0lɗBQBS[ϟlhFL O @D"M&oYXaBrV2yN &L($AhIZWk1[k o긚cDE)@Tagw?ciM&#lk((Lx{wGS˿a(&J?V(V}AF$-+b Vg+(᭺)@ "##FFFaFUC/B٬F. `hSe֮p5O}1Q;};ԗVQE)5ZnM',eT}mY0uYV5&$I.2To־ʭL NS5wzOXC VxetuA1\o{)g3N^U"EfV[VhMl.8WqۨQk6ZHaAYÑP3zb5ۇlIV&Ӕu;@jNBSXg:aYR!c$y !#G/%:=o#^1It=0*!/<߀}:3P%=ZqO:K{]ы@%&52Ϗˢ6<(ء/(x!^!ȡ1^)F&TRw'~4S\2%KC{pFrVZ~wmuh3}10*7\R3Rᴐ!??,é:g| ײɭ^\k䮆|舸)wYt=L?_ۛWN Hidrͭ)2xLo~c#<d05cY!8|s +#zmY`L>G/J؍xea~O:9~j*G~^P^v/0Sy4ϋL5T%'˃˻cOo:ok}%Lj٣90J8<gPsf剿{^YZFro*ꀎ'f;ـ4MdmZK|a6ٽPkH5bWUP%vJg?&Ɵ\C#ga?L}@)nM?Og.t8Oޢ! O>Q8%7obswμʍ(W!RCIo}f3k@QA;΄X@ :Fl_h:I@7~~N )l 2zw[.K'xtͿP*Ujw=06A0G-akG/HC4E Am԰&)DǮ!a7 4xSd2\ip@ܚ?eN:|{Q%2M,ŮWvِZ <󨹢!toy1h;*9Y8d4=㓹93PЋm`hCB/ X|d!0Hu=2Ԃs-Pvgጔ`'M%dOEM2E,!\\ B򎷾Jͮ:ݓlv9aqOw,hep+1)(XkZ]tE` `(;UB)i9 @c\aR; ipֈZM5!Y; G^jxY^9ô?i*Zdm4# zz`@Q-qW1d83sn%|]P|kԫՙQL 'H+хkK<@U'QHO01 +['Ŧ-yX9Z+)rk qrc)$xr+.W:pe`%kb%ϘTн'>VDƒ~;`Rip4|3~梞z]ޠJ[;?c/Hd {5MST^&Q_N8UG]ʅxfHQ p!#y5AdP* {m!(xrfDf-aF+33O7@1-9όe %#a";ɒ~2qEX6:qwVW P}.XA\߲o?(]0)nix7x a2%4- OOo'9;]]w1Ҵ/!+;~3u3#hGh誩巓PJ-Egx0+JMq EHND׮(bU-= HPcV8 IKǪjJңθ\9ċBy `PƸ{k )«ODQgDfAzZ2 ˕NW&@6mA0RaD5BO*Q{+eARy1@Mm&8&!A#@ h>u2PjyA(i-]LT-2_GQj TjVpPJ~ CZCəxo8:q{L3<:Q͸cQPX9t }$7NsލEPa$&)gX&|ĕ|̰ x1⯅qbØ_ײ 0zٻ+^+g.p<-3ms>{+el@1xض q&Xx) ƻ<BPY9WV Y&bʴLY2!'QԂ(bQs8|h 1)d;.0Ƃ 8}h*'-$n+^/]۳1Pfc=ƚ8ɽ&u*;f WQ$~'wWȉ9g`M^ Ol_-q_~7ŗNh~T蟅Oaب@ PO_pFZ:<b)-cPъR]L\h݉/ȝ2ɒTZLVc_]Ee,9ӆg±l x = ~FwӑuR1kN|t_%62{t}!"͡Ov{ qECV<5z3Wz%&+>-Aǽ?rY3>C_%ߡSϒwWwG^qu2C+[} ȯwj2N9ʉ;Gۙߍt猬1.ΐ薅48,.[o=0m]sKSfb wp~`󀜇?SpC ,Fځèj NW ` 7Sh'GyįtD+}w AC9PDxjk+KS(b/ŜRs\C3>*Z-Lf㳉aqSG=Twm@7[Y=2\g[)9h/Q]:֪O_=.  Ύ7(64XRR m>ڈrv`}@(͙o*omde6h \WH_^e˛?:UޣK5 ?QM2Hy` g޴} ]sU93K}XSsӁ`рusj0˰NU:Bʑcۑ\ڄz^̓ 9W׈2QNj@n$%~ Ǻ׫Ey36kʒxda/Lۛ|68'?oJLs>rh V~}fj+>hE`SznA٥ewfCWߩ(}Rn&=a1e#*%+gűI 62R|@фZbPKtZ>v_ w[t]ցpm%S[2;6p';J@*ǒBps 'Jfxҙ>4Ɖɽf ..,v)4O3eg>v*nxʼe t7"'gowȎU§W4 Nō8~'%{#f/Z~]Po9~'1.B^|>=Ojh\kswo}|cUEB,kP bLkeZQhO$A ȊI_$yuzm&ULofô 0ȱ2Yz絲&JDH?)ikGV)p1 q86ҺAbr'M?R?st!rkBւZ4mċ.i(P1 5#7.*XkgK|,t<;9eȤs"Ͳ6{Tɟз@DNn%0U3-u ƑqG'AcIO ]CR07$!"Su"Sz)WBR;S)WOXLSe\ Ģ#LnSF"'e!Vmh(J;dg{/ Έ`Kï? 5]l3kQl]^9 %vMfvewaN:Wo>eـq]p;$ȷSY ';{\>uҰ:sY)gTo:ePS;l~ ),= e )T/,^V[.+W]\0/zR H(sӵ{s~} ȊiV7\6W1BwU缫; e;PwpLKtѻɨ"m#oS G*9#o( P[{4l590(a7+aV } 'ܺ,PA'rj}Os3H>ԥ^Vp3lP`im~o 97ٓ3Yj.! %r~_6}wyr0oO7s1/snn^| X#e#T"Zİ7&SI|DQ(WV2"I$)j5|d*!tLH,tVBi2SV)π l絸ftו1F$N[c3N[[alj-gjM0gtGS ?RtзY׼6c9q0R\K=\6z#2q|Hi&;$JAOM, cm*#sU۰+8[2X 'WKraZuy9jz (kE H(a؂$]7.1.ha`lySC\1!u" )L@aH,1WX5JM=sTD(N7ǴDgL}rDX) )J=a|^ymI |g:9*y^\< 7{v0SynEasVR2"Fy/1XPxMM:-(x^eB`m?B)7;s$/zq\/xsUKmF$Ji|캭R Pt% uAp6UO?kCKBgmh<*z|քVYRP&jpdcu+ݗt wǿ\d?iUl]A.|O=ΝVkPE &Wfi-`uZc[%r$3.p'߮Ԉv}b!4*th=J=>"Ob'&'nݺbP:]һr}=t}5Wٻ!SF9#Sul ȋrGeNuLx: LdS3l)삹ࢼST =!SvgyP cM. &u9%mS;UQꃁCQQY,ӈןe@ c[N~,Uɹ믿~6Wӂ)yH|370TatA[a/M¯d?y^|&¢4W]|E%6-"Nj7b'ư{Q P#QCm7W 9d~d?Jo07x8A}JȤ<+CYFLXc7!^bgI]^ަ_4}ؼĜ=xf%Bʝ.r:y#de.\dӠUHU>]aPsq֏vq1?"϶|L_bC8_;$aaI3#D6XAșs31"o=⾺#4r–g;㹣>o7vț?Pv6 6!LTۏL ]^ =i9u3%Ȃ@~^ҥ9n?s -GNgGTYB3nе=_uf,!X Jhg1sb5w&PF ƺ0yOgSQ{łzy $Aէe~%th䒊en_.z {͛ԅtOtvk!PZT|Sr|Kg1N/g(ɇ>/7\Gt0)FM0=Wp}'u\`Uǵ3pu\u%\4W*r㽍4cG&L]HܠX]ήoB)iWBano~ -Ϙ.Rƫ?W/پuӉ%۷ڊ 3Q ܭOKń\{ńoI䴼Cń1}7Lńz2 psFzp &Jr>AQ@;|y` jUj:7 BZ42)W4Ȭk+APK|NY-0#UsK۲4ysb5z4XBlS94*ʢa(K] DI 6"LQ0J̊.9086|M'@֥0:(Aň2 Au`iCPRB%,xU0*j"Pp :VrәTaދ}5sƮ$$6g})'b~]%:Gptn4-?￿`.G?]|Aߞ9#tV2'?$^-u4}Jg_'|7U~r';_HWd7ٖlC>!=K] H==f7}qMӘBRv³" z {Oӓi}ӮkvqB͠y&K L]dQF3|2zQ̸bT6> o6-w>Hi@ΈBNɔڢA#L8g<6PL4}uӓn?*=Wx87oZW*ܰGR|d@(?'#1 _; 3,d/GDHLnz %7v۫',Ua\x*AeGUfs䫚;l!_5uɈI &39w}(q"J _z.@ 5ܠQHbp(Gq&vP$` hZyVU[c34O3di=ydh,dXߞ60@fJ3Y>ġ~^zfI]Yj&0!&bꎢpPjꙣ*$B9ʈcKn3&Z,U'vGPV Z"]gX:CҀj,*Sj-+4nqACWO7e\ڙ@5q]*ur$ʀgG"ƅ/#47r&۩,ui?E6WA5xSoޡfY5GHhds{CAi9e]j ^AHW*Θ_t㮓3a~SB~^l3j, `Fmo٩d$L[EA32>*2t3J KMZҫppn? 1p32Zo;b>9%m[)V!Argj}=*̷iqb RHG1(Tb$佈k-,Lì!"/"1a3G`셅qvNJ+ą* w @BG g0c ,ډ%"RT*&5l( JҔsVwږ)AX_7U11 oX_6 cAP881U&sd v `6oWX)h +r+=h;ȱ$&}{H6Ox4RN(Dⴭy0=lKGRn^Iʛ44giKLz8'r;JBFb0 |*{Vk y?F h#y O{3']|iyzAYS*3_R'54N(WE{%JL^m#×evDKJ΀Z2kѮHB$M7-,BǐAMDGRLpMTFShA2|:V7οCGJ8?`lz2A2kgM6G91-q$0".f,q"~M$a6L%̩ґ R9VB)\J bt@h8@LpCDPrwGMt+>63NP s"2Cax!mUwIl y "H[o뙱񽼰ri pX"Ds 'L-f))(!PTjB5he#oQS:]rJ,l+$dэ19|Rq%zBj@:! %` ]s0Vԭ^785ʹ`ӆRBq d!deB:AIЛ\p* z#FO$.#F;=#{سz8mQwoXAۧEڨMT=օgp_/!n.o밐{3ۻ0prvU=Mcv3Ց@|<-a&) L&i[X%ɄzKrqᡶE!@s֐ L7%p@1?]!HLTٜq#i.!B9skfT Hfˇ0![ӻUK aSI,IrHuR~0%%F1FɸTHfAeQQQ)2f!UZkF8^BR&TDAE'U b8R8N2j,dRBEa3. cT$z =MAR"N {t"1*Vkeݵ)Y >ıd;$1 98rL$uډBLKN/8Tr6Bl1Q4%1X(\V b'~sHpP'B3 +|}p! /ּ T"$#+($0eTӱ9ڳ0+ ./j6CCj[={e{TxN^5q&_zVS篊Q6Y_n]|g |pJƔ4e7e!@A 5.y=@#LXnM~Z0‚̧`> fۂ^.v V%:R803JK4p9Qg> |~$_rʬ4l[/M??lʯn.|g~$m"bQTNSS+rfVCh2- J7h GTR$hT@b:E\ɯC` ;V%9 ܕ뫿.no;TSXbm .x]V>/KmCFE_Lp`TXPA^!) ^gI,KUUI cF" v@ČPi50H)7 Fhֽی0~ Uը-t{[7`@28V-`WRY@Xe$ѨWYb= Yđ&Ioa]B-vxNbN \*ť}-qCbӀqqK)H[P~"dCLh Qkr, X9 r*rY™o 0Fj-r$JAQ)8Z"e~6q;I9YMrq?ɟ~x;<3( [uѱG|ʞ_ݡ34(KR_-=H$ǼZ-lTŠRB.PK1kGUuP؛ߞoJ%)};E f3Ka L[*LȀ`YJސ;4tsn CPPpVZJ;K)G(CX{xOLl5j 7q'cV(EX @ՆR4- X ҇dMɿkF|vl~d@chli[lk<[ 9 4E霕bkw^͖$炶;[kPKrȶ%֯Ѷghے ZB$tG(*Y= BTvVWxHNɧ`J߫S&X. C'4B 7WY,0ڝ2*"- >& 9n6GF۴i)Mnۼ9QM{~ƙwöXDV<;ah1 \c!ܗ'oI7տ}P|N[brzvzڒ4!H[}xzt-֋qHϨQ0pm UqhHP@46j'cޏή>K׮{K@Vrыяr5("iPpd>8] '!򔶽kƝ7Z)F8 *9EF5=3ڢ֖oԧy\eKPB>{+{O@>&[N"N1Q<A^baYJK)ci0K@ ?gVɴJFSɴ84Xi&C0M.uUٜͪRZdvc^//;Xn:Ύ,5Jz{2(OQJ3M2|*P(r؂n톑ɟޞh5]n+R(9TCє7'Tր%RWLORiQ7kT8*Rd!Q =Ρ|٫0%5ZxQB5i>H)9V,zrC"qIϙ ^t7Vo/0GݟÍw⛻K[UxTHď.ys$ 9 !SCa\ŝzΑd͏.5{c.!v`W{ږ7Cs҈EcP@~/B%)JNKSZJ'aFXm~6Xymvmdi펑i2Yco}3SrsNW9҃!{ER8H0O.!Op9x {m3 ~=מW(-^IPPC+xqtjOuBVN߼CM r{9 åkϨ^*dZCd ?c YcdrgjvV'v( 5+rK35jTQW]eѺw(4'Vۚ)+V|[TXdOL2^A["Db(H0|YWS.N^$ZB݋ݖ:VGP熣"-}%ǣop)&-/8A0F;|uK1$ $W5饹"=<(jwx\)܋e&T#Z:R>Nr5qKk) e#!Mp!""` VD%$Yq"BmVN-uW:`kPaxRH-cƸThcpF:% -_C]Dcj )' ,UCR*o {"t#aM"|XiDAR&e<(bĄc j PGAB#fgDګkU~/S#ܸU;BqI!U/DZ2: L&Dg$w"ǯ$\@C4U$΂ғWSZGgҧчk)T:ّD fw?MYڡxLG͋g濅C^]mH(*ȩ0FW*JX &N8*dV6Gw9:+@L$^Uy ҃{9wYᅧRgH C^v|LE yp B7qhw/I<YMy"AŨ(#SLd82dHZs {oU3#z3RRؑ br:0!X7܈-0>aKwnܟ) A/嚸l= Pg[Rr'wP|ȹ64~=F?~w2|OWW ?#N?\%}wn`ts퓀dKB2GoLDME\Np)C_l."zĄ ?F^b Jz!8 .8IAYPe9 9 s n|4`Ehfvmfcǿ߸d)Ll/^,ap"ho Wg rT%OZ6N d,y.٠<-BٴЋKbڼᒘ6q.-Bh5 T>cW^q|7Ƒi4kl^oQ8FӭLiEWNJUh/u*#;jKC vgWdz`3}w x`3wo4}o<C^gE'Js$ـGM99R]9H_a;#COE%)5erJsM,J„Y!h r@j=:E-w~%I*Zvbgp(&G h%T=CnAgFRQgIFIeDQf .S20P vd8"ZsR$a˨.ntaRR*'Pv^OLI.V$$@{BT(A#fM$UP^RaTR!`L0V#dp$NkxGha|

[N;p 78oK'V*gU,jF`B2# 3D ΫdYh J~Q&F* 0ނdPe+a̱j iLtH !ԟ fy.6qnBVf*jT -]eܫ{WÃޮ!pi= R?uR龚 > kUSimV-E+%r^;q:EA4vKzW-7,,Ssp|X )v8]ޤ/}7{.f YKJi*/Yk&ӓc)r&35)%v~K^I\Lbrx MqWo4%H (P~5!b:d]:^Y!"yKG1D˥#چr`CT!Sh(a{# yeCV}S<^1#졒zЍw;=*^Vyu/ָhݻ nmUͭIU\ ·ZAs}*堹 ڂT!ik{Wi;2B-JsW[ {Wߖ}+EPyn;898}:CE878M\ԵK4Q8h{|XDŽ"JQ-G:J]#w LoGxQNǏ(LoOw.p;M [XHI+3&͍_527)u..g#xO}Jl4tTҭcq<0 ^Mx}sw1 w'?OOg`*WU BB^UqTUH,@%Oxü4]ܿ=鑳HLAϴ[2PJdlDjճH 2& ~Rkd/9&wǨ=ZvRX=eIɚP?ߍD6%";wwKXA ݝU6/km4Rl z&K|gFlvyqs qVaMeԪ{ys9o›*w//Ynup:'*m"}^9xp3UZQr8CJj V>T~.C@e}PvQe+ƯrK䨫(eټʕQ hǧ8x ئK'3x}be^ #[ƏyXP_NZLwB@DRx./)t==QϟӮ$")$w_^? |qT%B 󯬍\ Ǟ8fH{ʃAx3Bmq+t\v1'f)xCD?k_g!(jث^z՟ !-Sm]-< aB,q׎BPVWTrdN$opœb}_+~?FZw h$l]_{c]_7+z2B%P'ga䊗93,(mW 0!R WR[2- ʵ+NE GLd"-&IU?c,Q&^˦Pj1яEA(Ģ2 ?_)| +*~&Q'3a؆Z8꽕S0k̓y6eGA_HNGR~"TMt::+J yTnHFݼxh[䯽^hF7VL>h'd/1l%k;[a-=c-g! 'sW}%#gɩXtl5Jmj΋:T9}_$^2cZm M W됄Uj&B_bWoKp$>F :~Kg(1a%1*V$Rfmڜ { W08s1ړW ]i%:.MX b3]Eh5FGE6zk24#Qpk 1ڱ&JLQt9%>(Ff)SB%Vj#T.gRa~H8VFXiR\syQ֕ yUj[}EH\̂w)[y> e6qs-bG~xk1Z[dVȇ<~{z3zu'\ADVu t6_OEDX|3BSΕ\.q'w&5(1#z’,?vB=n#IrBŻGD <mx0w!TeevsF׈gwGX"Yb`Q~nQŬȈ3صA+,+ w86j qn@3L lt \oe@t0מpMnkq 괿Sh[HcKg';dB#ڐCr|ڴ݄Z hFDҬ+?VVDBiN Za|Ihwǎ = pDw);囕;ǷVNMDq}1-9dez_LߨiDk_1f㾒3TwIHyLjQؐ=I6†2Ld+ref24L8MȚw`|,__ҍ] CJ""1af=Dԕ"izl$ =NΟdqx-3\j79u ym56w$ r81 Ù<ƾ`8&gHr4 ޭeL6TAޒh#rć{łj?pbíĭ)K MMFk*ӂ/lqsޛeq5N{%vȎbޘc{#=l$\F#2 H cL9:h# 5.r"(D!0 (x`%Gc@{ZkÏLI0)&6,8c3[Ii` h[( njaˊV[^*j̈ S\̶|lj%6o6jOp Sb,2s5r_utM) LYa-ΦV(vΖVw꺳5ټb`;.pZAftUc@F"ˢnLUH٣ӅtV eFb{`e]膁D@".窝993`2Ld\ ;,7p7^X_RK#FcN8LBxmx@m"\ ܢB-ȍ K62[aV)< H+œaھU>Lۗ|r5At0)r%%-#9D HCC_P2+ϊPh(9@\[X^!`LE_ᰦY,@m?4D#nL'-l^, HЂjVcQy[RnhS gԊL >פSh.W91SnCb0!d` y$F>6BpOa,xÍ^ BUh֢(3PQBLϙ9:1׬2hҊ6+c}* k$He@#W'h x%RĽBŷ ațh_ ;%jঅDB :Ky+k/e=RiSVp>,,ɾ|yeq7uiZo5uo"Hى?&Ogȶb$In?(Ez,wy\du1(r27lk(nU%)oKz?IDx/aHT'P-f~1T=mľiܝ]|٤U,*g؏}}Y7]8S4v~x@n 0Y'RCZ%mі)q~N`ī.bE,uбXNyhH-8 'JG]^>1_HrQEjý/ #.m꧋__/ŕE~e.#]Vz+M̞P"HG EY-b 䂋8Kc1C0*,/ j'?zG1 ~n"\GW1<Z|ˇ+Mlu0eXۈ@ܯg?^*B?Ent3o^ށ% 9oiR/mj&>, 2,H|;>CԽdOid`u>黫Iiy%I<}}Ord!:i3s;lWjuG8qN EoX U 6)#FjYG c٬1i|gl  +FZ8."1kB)!*Fm,ZRFRLC%{p͐/9h) @d΁Yre ?HrȤC %YGme n\{ Ϯn"ііQFT/䛁i?0:O-X(/7ڃ`fS&Tqڔ9kUo mHDA|=ҳ҇4Y`-h2ӓR(B51,1֎n]~v Q+3K,E5-V' Yq[ ׇ͍aCm>ap9f:eABdFa-3fz2,u&mc6뼼fJVb^䲌|+_8vpkj7ǹRu"Mq{f9ڝiL3yHkzCjƈf#Uohl|f+ᩞ&-'*vO~[#_vrj`.vBrx/X锢`PADX͐[#JdMBoܾ| 2o?6}p[Ns5< Gn5Xq1u\V-; : PYNM$n2Sj,M 3Nh_WBȲef=Q R<. to3ˠ{\Yyuf[^1cQg[@!+4! ɓRgo_1& V: K'bQ*È2v9eJIOTƼ[x={NHb-JUә*U$xĀn7º:HFj$0\yݑLbT`N=U|弱I1Ap|Ydd-3NX0KfdMzA0E1[\Ĉ۳B ӑiǣ̘b To vrFȝL MzxڗJb T2#rȔɡkiOx~Ld{wν@f Րc\O\-Ҩ%{QvkсJ6iA4pP+r{.6p N(>*`g]ZkxY1tG%hW F(dVsyf͘R Y;I*4LyNaYj)eIwI:Juf?s+YN6RCw`B^rPuL{2_}*DtVn#e?\zb]tuGwVrʖi 2G˓t<.ugPD33A!@cp*o`Y<˧~T{Is̳Y"'"rr̿l) ,@kh鹴_H!N1wFBwÃ)oZMjYs4WFs|ӜBhsovϵ?g~5Cq9WMPFd 򢔙օP` Zfd΋*$6ᄔRd+qb]wuq8URڭ $;U{o-[eIhᅠ$˲z|xtbnfY[IJ$/Ii_gXJ*MN;ܜ &yZP/;/J'_[Zx %ej!16HP uZ(M|Dlj>q}j!>GT*ϤeVPW!|]xk~1ܧ9S L;_D$<3 ;ɫiP:𞸷{݂ y1#0&E<0ށOA̍ gHFnBR5C&cΜS c_ۈ9cB91r`uvOװ1ߠ܇u)ޯP<۠4} X6f|2M'f+@+ѣ.EHeQ1a^}\ojq)bcEǹ]03܌PT[JMKB 0ls0Otn.!uy}RǍQoI@X|)a!β#^ؑk~ wޡ ƹ:cjŠX 9xGٴ${-4sT# p󏽞ݶ_W6] EU:xњSY0?rebNopg=Ԑ#QuW)EoF1BJM 0rA;:r2;0#wYKt.XCx*c^}.^Ro)I.jA|YxvJ峓tA`y >NNZ0S*Y! 7grxrzZΗ.׉+)p{bl{M|&z-P}RFXC<`4ɞ!kӜxGoGb93YZNWW$䘐fD7f^sNfYغ߬"'qaFI!c;VWaע~ĥ`ڵޘpwƉ5-!פ1Wʻ;,d*aO @1`A&`ijpdLչ>fRU%74 irp`GDb3o9w3gFD>Ɩ؍G {]o=Y =Áon bW#K*y_Uo쯚. $$/x.օQ߼{+7-qrDfsgE[EL^龴p()&]t-Z,&Z4Qr21}0FJDp{I< Bg`h!S^RjJ!OB߲2CT=فs!4^BbXpb#&#=XB!UEpd=$F< 0[ LOTh:kJԠ̡g9&ւXBhτUPd<=:۽35@&Oŧ554 c֑{l5)]AS("S ޡ߅/S -lQC*y"j4_czd-7Jk湌x7HEAF7!DЬ1PÒ[{NJ a]sp*bk&OԌA0OɌq@Zy1X8Q=g3KM0Q~ h&?e,M̨ӜѲhzM1j3BYKֻ0خqNugP(0Ge_Kp>n!Gw^ecmr7KdW-8 pT'̰q]½s%g 0h"X8#sj XDg7:z15>wqr ݉0߾qԘtv>4~t>Noen_)}|Wg՞'??~ǿpg^%Y_/]]* 8k@`ؑCg;p`*W>ϟH{6ʪڷOGߞ46~X9 * j [7f~"z&c%zy{區PqUwBblh=z|\t:u/ 4IJ-8`Uޝ|3{*Y[d\/$P*(QO.yȹ+ O㠆49>x_^\aF̵b!Bc:%r}eZGjq, Ey֣肟nж4lg/,{xod)Ockqi0% %;h_6h(V1g Q=2=6.-%?o#ҐB ؈JakFX8]'q,Sj1Ilso`%Q_q7ߟwJ#qSm%Ԛ{h%r#dYa&5c-QK:W -枒j芧;{ӬL, c&ؕ:3u(M(cD*7ăzNysjӋm)q5nD8hl%R_W/ ɰ1yD]8^bTh:~Ll%Q$(嬚]Z1d9Z;չq>!6AQ#ɵ 9+o KƢB&0iaS/MC:lQpeA%CvۓFf@AQV%Mӳ;I0^i_%&:*' v~@e uƭa1KH;s7xK2c-2V}1 B{rlhhCtwd^ACC,t×l\7Gkk)ι@jȘG@2jRvDٞr}piqK'woN_^gn]=:#2(fkfݧGG_7~u삋4(KX75瑗s;?jަY8! P*h'p󪕍U֪K~6\jWcHl'kf6ȁiE~ZtZM~li Ot~ft=ɝ䲼RfYߧp!aeI i#ea$ >-L|?KB%%z¥ŒΛɝ,Ubb*fDx7ŒTGY{m %T#<R|lxiaFpX^< II*Mg1?1iaB"êIsEтfŸPY54畡)=bb"էZ T@sEKC2-٢*# ڣ @y1M%5<=ZyOn6.l SaݛG P}Zy3վyQceR#BF'}ZlszokMD'x6%JgVͿs+͍gu])lR1̏%Ju"vi̛P4Δ14&l7|~vDC9=oa8=I|3x߮$yˀ<Քh8ɺ tG#GUΰVK QpyV>X'BYޠCfB%+̃6$s6@nֶzy5X_G+ od۽ lUY>]"g~UE7agTs2Nq6^uou,(nh$_ .v`*9jE%ahNI iYF/?ers )u%cCH*r&Bx4ЪX*CsgNaoT5gmhtV1mvJlN:edYY=6kRO"X; ^*tnz`wP9D룎ر $zp \JpF?N4c^$eџzpC߱&Mj~T~XU ,ZGuISOn+1]LfYjʼI0tXI%dX*rbT?)+B<˵?"V;(僓[I mđf]N{d(99^TyS7~*~-;q?-Rs{f<|j~'JMٲXO>,jhى'YwV8d2yۂje\/nϣvj4~nZ-w$ut6kvVOӜ-|4FM}O/خ ^zMôشC4CgP {Eݚݍxы~k'%?y}|}}txG[ͣU8=n߶k^藗GWG/}M׃CӷF/fhZEs4W4hGxT=GU;zۣ4b;[p,9+ίE_٤FUkN/ {Gq6jOiO/w=y6 ϶jx:s2dT|:Ŧd7;DW~R^~HE=@xy <?tرk|2h|]:yy`6<0ffY.C6L2Y ]NdkiuY >ss,(cSYFW  S 2thD[ e6%ێ˸Ǹ^1̊k= 9))/Lz<%GKHkRZArN>dU Jᰄ.Pl2t ƻz](u (%6ǯ~Q:kuG.2f)!w~<qt8817q2Oq]ku@M^:A^ qS\! ,&KS˲𑜶CEAJ('Tꛄp&ǒA-Z3]8#(iR%/FU7Gf>~ӱ(@kv6mWu=:y>҄v2V0P{eXԁyOli[TQԅ)AĪKnmOܖ>㶖^[ kt.+wDc߹S?$[`r+ :]Z^[wzBAK~[LT^ ;rIamf9'z+m&:kE9*chY @`B铗Dk5\UKoހ%W]nǟ1kvDWun7[ԙmҴUi=V2^Wͱ5Q,5zRޒEN8E[#,HiaeQ1X? kA IѱyiKcFF|{&8yyR|4*0vD y}6O+qH`kk dž=!)nlY}#yu(vU6EdyD@Kp%J'ᥣ9,,sm0U(/ aSpL(7_ r/ԴN= *P >6Tq}h|y$G7]G,`bu\Aw9@9Eh{\&RW:6R.6Iq_O-&f2 L4'/0F",zuF |}9$?Zϝ3&fq Pw)UtJ1~\^?{ʱnz$}Mnlf駻{~ ,GU4cRo7YsiR9o yq0Rfak pnwxmKl1nj$l0=MOcyN-BzV_b-aTw& McU!5`6i%O< WѨLbWѬ֥ڠ*u԰mܜ~ܳ!F9|.Fy~dZ [Ea3dhz´S5(~\j[l5YW˥.O'337o/:~U0вa\/z[0QJ~+Xy+6^ӊ ܊{׭D%|fUUYXUVVq,8h1]|uYoKt] a5:ywF4NqtJ]ʔ%p8^~Ui\38Z<0MWHTf h(^x=\!XرǮ3k"CsKz8Ka&bJ ;|L]<}(e^K@sڤu!nu;flOT[S=B cƝ39%O?~-/qe1ƨ!~0u_njBxLα+'s_ec`8Qb¥^pQhVp8˂pc UD{Ņb\zWpP]Np^~GW$-ືi{EGYۂ+}beu2z9N! ~'P[LއuX{ SGWzJP2uz0 so|$ zђbm]J ICd0K!t7Qqkۦ*E:s~qe_[,_T-f6?R֕ AD4 PKBObr?c&kAjqE =| :asެ2~|Պ' ى$w#j᷉N2aNK?WRt>[k.䌷eLV4? >ᅧG%ba[odCskG:O:ys{*CU ԕw$\ķW( z6+8\//+&UJ(0"dN;葦#srR+޼:e"h:SMgEtM63:XsLcn' 09hf!>΃/ýp_mx5׭#/v]`VN-TAL*#xu&8鹉1MG1>YT,0/Z1tQjF)`Rx0+"+2҅z) u<@r 0Edf(*ÉgA@p|O'5Ohxso{ֳUJ\pSTJ:Rǁ ,yϗg:+̤$Xk[- 퇃 F32k&4jX6DmS r50A DJ6s=Xq07(P J!) D!0&Ha`wppp" ,3(At>"~V:4`$0ә" Q.+G@mvAfBA\HĉdGAZ\Y(=B[|*6eha*ehQ*6h,*eh1VPY6ؗ(@K"*vvѧ;$}p!\%sJJ+J_$$i$^q}nz,F}n>]# X!, 8vK뭼tyK.. ?Yhw1j%LՃgv1c.9_f,Oe& ̛.*";ªSx7NYs-1|ʻHug.o88f0|!a=82s6iJd0=|XZ7,+l^PV7Q G"]ZǸwo~zu!c֕zÂia6lxRJn"> sZw/iШLÌ 6Q\ >M|N5l!b[…hIm(0m)349= P*+%fhmZ Bkh# {PxkymC0 f1rm3 \x2ڱƥ CjTP@ڜlQ ]ÈA5R"t!v49= ^/!>h890Hh)(Px@I. .H 6z`Wz]޲+8륓)ثX\a??Hg]o_=rLd`K{t>,3_2{lz-˩B>dW>=PGRO[Q"uu-8Ao Q))=~^Le7o^M3 }ɷ_9kf`X2I6@Y 5j-9(iǬX0kz/:zdT9{]-U|6Oʳt~g?7wÝquVI ɵa :Od ($ipk\ 0O$ZY^4iƨinR6yɡmhu6qMrP9dF:Y x6bȒ1,ܜ /}#\ܩ|9jc1kLj4rjrM]!P7V43pLT muko`k^ f3Ysk*f~c]IDݭ%hD!Y%ܪ 9^U?mhmeڂ{f} cJl\deU(5{vU,zR  ~yRrYB1ƈe#،inGm9iace<6_ڑ1_/c0rz|qꭻ}/V 9omV3q"E_Azx}r/uzqoÛhpZnY{kZ۹L[3#`aV?(mIGˠo-a]`4yE @B/..~>8Ͽx .SfDQ 4WōnQt0ǝ{t+۲̋.zJL@{^o~OQhM;(h*lHTTkCj<noϏ.9:?:;9x{d젰M4CIejrvGW6Ubx2j:(Ytyr>8<:gc'A qa;?ytn7z~}8DGs|qy^ :I*+?\ߖa'2&.:>h[eQƏ aO'JN5%@.knfΝA6@vwOsRA]oxZ:_q[>č3$N U> E}}AՓ%W^]>?NOwIiw.kS;Pn7[$Ϙ}{tA9NorPKF'Oun[rӽʙUqndY=.;uGzvu)ۍ85b/䷽;޼n~ք}8ЄTVtK 7yѢ6hzFz8BCu8= 9k$NwYA(9LR:n7iG6Wj7(5~/f/t\sE_'&f@OՌʩ'ZFsfԢS5{@hm7qB'RˮTAme#!'X}H3FqNCzL% 3 bY`MK 75VdLjвI8,`WC& !B%`YV,[eʢՃ$[A$, k]\=Nfؑ޿h(vUR -iV;m`\/U`n~˗ ~ ~ïEɁ" \ { ctLZ> BE E5=+^+赂^##+#WkVȈnDMdjzΌ \Fl/>#v5rS ] #6ZV`ؾ8Wʈ$iH6j5zVl3b P_/9P6 Ltm7(-JHpF,FXz-^j3N5Xjlo>!2;(#n*fhŕ9ȕ+c& I r^=^]]M!Q';[A>ˢXw1KҿHZz3*$Q$Cac$+H ^[#pDpD^cKk F ]]{ i_jbR*1?1B*Q  FGD >CiUaӡVRCzh 4CxL&W\E "lpmf[/ ${.͋! 19b(̣pfPmvlh ny IN``@)"!S v&L*M+{L+I71ɚzyT|<}*sFeBE鳠P0 .xq"ch4j8f1tG (!+LV#!EL?w^0t]/9fS0Sx < TpzüNddMz j&Qm^>vЈ%0`a 5$Kp>tGw0PUHO:LZ\. 3lYX~|,ʘosi2Ԥ^gbA6ύĝǫV )jJgFy EAȌ+|!hOa j2 CGlpD4pt#LE, H&}Xf2ͦpue므&J%W Hk0ɚe5"TNSN͌+ 7)g[u3C|ܴ:Qpbp ]ς?Agk|R"5 8X'mеZ/Ky%20r9<=:}+bZS988KamHӎyjrf4ZC i ٴju\?nuTk &y{8|7\105uSUgjk=_`IDm4q,SkW0̝CGe䜣 ؚ<#%y` {yQ9re,gņ@VuRoU KOrQmAƝ^cǬ _Ŗ<} L )(f i:uqantڿxi>q(&V ȴ$F"l/+B"jK?4]ڱLQK%3wtMՑmĴ9$Iz-A/ߢ߱ˬ]3|kzs]˦|:zTyTf j<˘5Z޵NYStC0.0L  xw eV'0]fikYxW5A=R=41iި=)ף&ߴ%98KIxm5t=׉Fڳvͥ٢ԁZ WĜ.]̙RP- T>D\EȠPЖ716HQ7Lً,8Z&\aYKN}K\R.8ȷ]sI_M8H ^BjZ=&5G MpDܞFsoB"s}Tȣ9dX7(Ey %0ZO m$k~j.(BOCgq$Ա$gtJy,,'ֱTwlF]5R$TJJ,sae.[LyitbK %kOJ,"񌜅9|uL3 8R`pvDnqA= !ȃ!'2 =1?^i%ڗ\ANgx0Hge!Ddb1y[d*oY7 `i6ǥg;`wuVdg[&&ԚbP/ܱ1`*IȘ CL"AE aRʈ dD8THB"Ǒr_H캑 RƠԲYTb:ڼ1q"DC,=1BXn}4 ~A\2t|`I}b>UL.q2t̀*5craysNĹ5ѳj,UˁIA{FUw x{"?ݘ *|~l;a.# XĉCeQ0[Q`w˩r*"I]#QyBΤKL]Mfek;=Vx,Z'M 0˾:u`ߣQqcw&G8NnPWJ_;;QKg3$.r7b4-Nm~X+zÃepiNjZm/}5xcT{c vVʅddzL3/8X:h2HZƜ5qDR_ƱL8K1ꠙߊ6h]UF*D#O5 W(I7v] 3ƒVjH(#0ULqee\k`i#ڷ)\ 4/ Nq sm%I(rDQ . & WZZ9}Vkʠ{k%? ##I*$^A"{,VX#{q`Ni + m%u;<%]g-EF(AE#A _B9.v  Z6F5!P&: >rpy8$00 :apzF"ǹJ`Ni"lHQ"f;= cC\rF[ZbaڋsAHSR7>|O` e\Ϭ%c#\vZW4KJdoI$穖p;&MxR̮;ˡs io&?m0 0/:B`%8 7O$]&nYl^BuJ^ٺ7Cۻls`8uv Q˓ʐ.7tL Z HsYˑx76\p`:Wi1Ja3"eȼc1!V.\ Q0aOn|z~r_mڪpcFKCGLk kN kT( ǯy"H߿e)Y8{l HO<|FF&qXB*V5@jZQ&eZ$u3P傋Ӣi >A9@9?PR$?šT!VD?&Ry*% Bz Efn;5,^psسCg.:$?=_9,6`(S"5=\l;_8_~]륐j D3[ї?/p*a{tamKj,C+^#/8Qqĉ`[H*}O`NVuK=mzŹWeWBoCTH(!-*nh?T>VplxsÇQ@L9|58q:T8w8M ?.{?}O;d&oٻ攢mjSs[3Em FOoaԍΖN'3km飈DI`4u*WFLpF-s,\R2o?=PN*Qo1op4˭cyC %T7wAsFھЅTحLfH@ܥ>+_ANV j:\9 5 $)㉦V#6g,-е1Ff  tK@gJE BȈ-i!(Y\ h{a}-/// +S+Ύzq$vUJ Ϣ?RB$9J, dXDGrةӾDSoϦT!Ikߝ}+v O\$IX)3"I;`erH}F rZ 0KQ ]Fqǟ:$׬Zzg:A72,dے6>ɦ}37W_3M*iҧ&$]0} %WW1kpT5==y˺~^p,%Xmtz\B}/.֨JgSK<69gidf?7l=7Gji\,кDF0L咙 ]ASţ:̸`F$ʷ ϭxZ9ʊ|ɨhRIbJ7xE PgJ8ߜeOTGN"LfW~:[7d _e=Ju|?\hۿ;D{9?ɮ1`S%fB]{;C><C3JFLgq`Wl,B'%S& 6,E_vSř3VZ`FqnB#?.VJB]vE߽@k UeHNۆwPxN z;  ^/fGNδ'9SFjq*mx>}tswj,b^<*嚆A П,)Sw睓j(zIry7vSI}R-F_\Q)>>Pa_8  w4ZxԄ9(D1r WpjwIJ.J נ[mB5,5c,QN:8s1$:R*5U), h /V}J)90#-RԤZ`IR8NrNI0"5 /y3C|?8Ce/δ7Y%mRh> 5bZx@EgU 6D9ۘw ` NX}}@~mRp`TS͑wl1" Kpo5YjyP.6V~Bbwrcvc۶/6/nfr1F>peRƺ0\}j6v3=|w |O5w#(܀v6.;q>yr6m{@__eY>er;mҍYπo)\co_ͻ7ߜxf&nCv9HĽ6k=C3n;]Q8g`|=\?z>HzYz$Il:N@8=LK`ڴwοɮ8뜽8}sxӳΓN's S0>3ѣliĻ0~Zr6\lkٛgg/>{s \}&tX._,D] sI+:sz?|ű:3;1l4YH6C_V$JjIn8Fj|UVef/O^<] #~>><^u%]sN;mOvh[cvbO>|zqnZ39=^g+]X?z?=Qw_yb3fk1Zbi]XT•UOʦibrM<hdo^zͮ.,HveU F b~ &{\W@#;;mJct.Y`zV*[dcO)TVxT$9°ONwQdX('t)K*sCPO\v|:~6-G`}Zyys;H4pϰ #VH9e8FJ+]TrJ 5ac'>2+Sw\W`%b8xP4QJ>&AeYUV`1(bw Y- sM$Ȓn"%l>_\_|탫]a9X3#-`P Q?1͹ ,8p],yϹZFD "zkM}V D-SrL?jo'XHT9՗bIRxpK$ jQc**Vh95)!"dj1|={…o}q|s*h:n(ySʶLNqwA ?5 <53Фzl!K䦠kxֈ)*}~.oXGxgdzˇ\!Fdp]z:N,D ł>Ɛ缣y^ GdXhP;ާOv^G5VjgKv"|>Y }xu"U-\mU3-jpT!|u0hx?'+tc;xUGgJ%x2ja?=9#yٻ8_/aoS觮/KuPkyB+0 ݑp$eB¬m&&v~uZ9LHո$q&&p 8q0n LNN=lVw.qQtmlVw?[(0n\6E;E6(*ex!0 8Jms7{6mF=&p5)*%2=%S f82ZKb(YAJh = dH7`M]Ke}MdL4 ho0i(%p4,ʝ⅋!Ь\\E+I7`@ <3z鸥:Jw h_C ly*|CLhp0peM 9`gl t:J)xd ja"PLgX>rxG`~%4&A;9ې>wmQyo?\m2Qh aQЂT[! {:bΙ7/;?e=ܫ8=NN*W;mNɠ~hnlήVh35&rmF C]9`-5 Om0k2:Tҁ$ 4"|$/p]mKeL=(.֑ RRFeĄ@V 6P9A(4Jʽ|ӏ6Ne'Hs)`]B;%( 6!`Dt* Z6`";C5a#-xV ׸v9w$#:_d[,Ub(BPB/xPtBzjð>`b _=t΢%-V!x+A_*h"JPu8p[m@ZX܂N@+$AKJ:h/ըҧ, (@-,#A6եZ(8Ac̽SbtJrJ<jmj B=8h4PY="!t&{ʃyKA\L}0!P~={ъ&vi#>sbAϜw">F|D97y M;GQł>Ɛ缣y^K53Y16n# oOENN?;ôzezkV He殽Ma;9> id,罽H-.#V,0[!(x؞XpZ|La/-ƛxFvi.̂"ǽ9ܛz^VHc,ۇtb1miv~l+dp>Zr{j M\qYwhep3iiz{gu+/XumRBp:}1oE Ll/M\;]]kwլ qMV箮F-X\mqۨ7fǞ_e ӂ>Z|vBn>^WߤLlg9׷*-D 81Ewiw"A;6Nv wɖR&oo5͍yJn2r׵p chI)rlZP r@ }z :k iɭ.*8o*8Nɶ*Lkmnoy]9|(q]{+ڝV&8эӺ[#bnk,LJQih77 fz+f/,QaH@n/ {kƤ+tæ4=Ky4<=%IP,;" `:I8qxϤd!<˱!fMbr5~-f#JDwVE=M>`Ӓ.qvw"κVtLƭSĚgpFW-vNoZz>bi&(НHh $iA"Yx,VXUٛ %:-)DD(&*nDXdրaI`ŏsnW88@4 )x/o0Tm yf z*vfw7'dFh'y҃Eul4 $sHrs/-}28JGi  @"9.TaHJ5>lIJ/*B6cI:ܰjANbo"S=0Ɗtgj r`>ב6ΫvT֊L)3xڔR7P$:j猁 La`fkv-;iPh6:e!49H&*7_IQ޹?_`S#;›MѪ#V(0EׯMѪaE,61AP:DZJpS",S3B#+knFqp=caс!-jjMTH uU&) 2H$+GLH9Am3p,A2ByZXl y/vgaNϰ YNhrЇsTTx'4=r`ͺɂm׺a CBMDp‰YNPD@5noY[ փKf_G Jv$%,hF4c*bNOJoF*m FboFG값%0k^S׳"Ut[_.[?,Wqu~v&k _7Nr3[jW7v3w?=u갘H XHD}]kPcQ,աa`]LqqX صn::\[U[6#::l]aA HMI%j8)Va9(j_;&vQot^OSTTw-AIYUJ*ojXe]h6nr7ᆱI,,tLA1)aN@1QWVG6tc¤% :_M+>Nc|XmĢw6X)jIWzu*H"LD(19GL^4c1p5qEs#1|OY!78yӔgkYog:Å 6ls1lVL)\b",l5+8U2r*ȑܧsk>/p3' )>f %VͲNs ǡ!YiJ}G [[rݕoWs;kq+H珽:EϠY# _ZH_o/'b?mI8uG-5D{˶הܶm~9f ج&n>"jF8s80sH ?|v}'~# )3M?= /yF|3{7con4}$m5 U:u{H`gaL]/6'>`ˆ?YU@LŭmDzX@'f`%P~$,,ve=^ڢj~==pLݬݯM>#ZN-]ߚl"[}=G/I;,g{st=_P*LaHWWOw?X~ KIh;:W.n~Jeyl[Y1[+˔O6c&Xp~C昿~׿4L8zJ1؃+]p]T,'ϒ>'$qfn>@ GkB -kXЮ1'Țց1G%d8UXHtkO13Dl/6znw `VcUn3(zPgXYy|HCVacYXm+.g ~y@>s YN:6L#PoJ`$b6= aut)e)8`!4 *PǼDjl9l/vgԚm(_ 8 $/^hM]qDu{$'#u1z뻫 3nRby\6vSDs`qbPHN_1hf(}TjiЉZ%ƣ8cѯX :%:h֡Ijާ M*W8L)xxǤ!W4pIz~H)Ú f璌8JW;Uil1QYF_,ɘA=fOZΧKMfI8NiЄj'B|6$S)`/j=< L[W=I׃;*#'GrC~yMpOcF&xz2SkZׁO;&$ӫVc{VW{ҙAe vO5 +a ':N$n Y!wMBGX5m2 bScznVQOn=|WJSڿ|Gä3Ɉ bc\sĤ~57HDAZgs>ԫ3T?!}afNSxJ}h4#TB:CSj:)oz _RaI ~ۺR<>),;Eӫl[_LJ1EʟMcRr1쾴.-SK):041 !abJ$<:Ig J/F!ӀcmieBH0Jỳ`چBJa BRQΙZndi.e&r4¦aƛ gZ7ogZP'yRn]ʓ ܴ}.p[%c(w&0s ZUk")/P$ttJHji'B|-E>v=<#aR@6/J&#i)0A5fStN%jLI>ɤRXrI=XZY{%5>EW7mrDYK?\F#Yyt'l<q!q%t .G>h"3(yZy(C5 Hz RMȚ"tX ?o"`΄zc!švONPbAUESwL!ףgSdcԄE' 797yOńO?] $V>0?A壟VzV#fu<߫fmř[6XzKvS8lG2=2sV;= 6d+cTH u6Tٚ h٢be⍳Fn1@dY1r%seGhY`蜿^vLx)R㑎1*T֩+^\w\PYbh֦H4B^$RV k4B"mlé!n+SB0r>eØz&Z'aLIUS^uFK) Q(b`|r{dk7}'8Uh1`쉆^z>IS.Dq>\Bm}pD5$jɔ $%"kPWa0vZZkQ{eQk9H0jt`T2u+ҡnPk 94԰cd0 N<C#mHkU0N$$ 㧡&STf7~yu?X[SC j!8yE"2<(FO~&h3pRRQ j]5&Ok'$!NQP(ͦmK9qZX#DQ9T}N@>IClZ2Si\B)" !Ng$!}O0Fx|M%^Bz_H҅IR{_!2L>9b2z{=Wv(pD`0VScXZs58k m D4vgƄ4Im{l0cR۔}LI%H&%HJI'B`*gSa=lT|Q=y"ܰkfq5V`]@kc P!XQ5yrgԊ6jw0}'4V]&ٛj׺?>T@Qftm{a@Qΐ,UTm6:r9jETS{gc(!A"R M95Puà:-ȬF;1tVeY0Җ @%/tydBQKf(02C>+&lJY@~ԫVG[Mچ{==NDa26A't}d/w.Y>}%2+ofw}Y{LjΤ4t*kZiOU!& A&x:_Qai)cɧ0{ 沆U@7 ;B4{%Z8[)bX%a!#/O>W/ׅ4Ԏn?d(1؜ flƓT"q&RlZ^IPO>P<7ɑ&)-OTK@ST9%SÑ u'ymqcñA BEHEaHq܃-v1.Ypj 2wŇf͇ժ#*8݁NkZR<^hEE@5s,ykzZfڜY^psrz _R420귇fW8fJ~ln= l"o0|W9Lbϑ4ݕ `O XE_~؍#`sr]\u|Ce)mGQZ*e$/N {`yv9G8?/nx}kVhǂ? }y z}?}xo7R.Ϟ>y@q!>?|o6{3˱yw\&y}7>XgXą7"cϝ^Z%*`5ptDŠ܆`R3GIЄuj=dwa6a<;e < W  3DJN3l`V>昗1Bs{I٫#}DɌU[_f`)I%e mL#XZJnڠeP$!4qƜF^,)X%'9!j8^pnV&3+4aeq/r^EJ:';Op!&〓J_Z2{io&1}VL6\ЪOb3Mj9SIh2` ɍY o[wRSvnM&vUOaGn:6ɪ] 2\A]̰/yHl4 $0y|@Je4sjih4ΚBߐDBWkMT9v?߷pS !2GZ 6"'M-js`DL{"ɎX;1y#@ 7v. m+ `'c$rZR #rP*HY罳!?D*n1ra\s<ʺR!ޖwS 3ULէ&*ٳ,^I6Le & M/YmPޫd c_TUT5>H ݃VϙB ^ecȍx f!w?Wd򐥎fC fҳx5 p>ɿ|vҎڧ_ZB͒\N.)8%D0 ̷qCK6=r)Yj+ ky(k}>5Brr_^B7x~JٿueseKML6)7Y&4ZI ).w5"kRLFMb0ي;mJ8|| B|̭+|c)EyC0XAtXՇwʖzzW?GD/>WW߭m(Wzs66~/[D[rS*D%ĔҳmE +J"#eɺK:UX+ E{8Ӟ֭t -+MThnY^K2eQ LXs#t1Hp]4Yfy'XnIb }':J&&B2kL;ye`)d#FuChx|jUӦ\ng}%Ghtb&9L`1Z;G"le!DeC9n"iA,eզbP#&5dJ4p2 !gv6(̆ f0AY`F94LO,A G8BHAa2 Y[șa5~_i2TU *S?tWN^+|k#wW\|͇+@7^x%oxn?~$?[7zN-iƎoJp D SN!!yd1W y Ѧ2t?^5^_LJ{Z5-ߩ+Ru E͏`}.{G9S4G,׈.l"V=-%vCg[%&?&?h[ tGaW7.c0' 7[rZxy'?UҩK䕢!+ YƁb,=wXn}{򒱅 w ]8&n|HoqSjA1!s5[jnQz.ޓԇ#}$ x5|6޿ؐxǢ!zz!q*Kt%&Bwl>zs07zQ|A^ۛ09mBVN4m#i8z Rp#x?=/7Ĥ( ̷pnIc3eIcd>{/z+.ۛ.9mV]ش\׭gݸZ/$8ь;G_wlGx ùg½xDr^2¾ :w.7sqPYN4ae\o.p_@=1?\xPvL=)-4g9Śch{<o[v@pq(n˽LrIZc0ȢbZݗg1&eL9xyL;ςMʁTu Tɯ="׈s /B5O 5+T0{ۋ}c\w,rot׿}D};UX`<X}.6Ұ:Uo|p]n_LE‹jYݽDlA䟧vR wTsKT9.^ |suۺn̿T̈́c.c];!3[ 4tf`, ˇ ;,۠,zɪ\es[Xn f _U gZ$(FhP`gV=:3{k}SnfTm؞uPWz:)T ..:x~\-%'A6 'bLSo?kѤf; }j/֢(u,:hܦ N"d KkBeH+( 1MB*H1;H>a8 4r㐑D7gGg_bY  i0#@Oz2vo|JOq,:hzߍ \zh_G/B/(P-0.-VFW~K]%}խcdZF"dM7MbKj9LAS IlD_@\F\/w_}zy'^pgFyI`PY!")Ͻ2I.y&Г&gdMSrg}Ea:[> :-,%뵒2CLV3 Gi8Y#ς@{w~#+F!~ egi[UuJo>'O1V5s[UeJoI0vF6bIټ݅|M9cIefgɣ#Bd Y,tv ݻCTS ]nW`|vmx^5Z1M%3o9f;e8fQu)L0q ˊ&5'1&tBT JIg*rHF#Je! 4 XFHH ,XZ<~s%bKbOZ q yZQh!i䳣cLЄ SI>|S9>lŽؒ*NIh GH8w> }]_=zu1ws4bˈY+RV2ȅ6Fb,=w<)Q8́RKc| X _ؙ@ܸrS̡UO}w ="S;M/pӇO̤)ϧ.R/mɤFA{j?p8}xM 8ranR NDLOI5K=P0Q5UVsPXi5hn7P!"pM sNeeyfJ% $H9G9-TEk]Io9+B]a|[u2h`з.\e$3>̔TH֤OM1{ު)': ) sc>OY' .HL(-x\` {Ϝ7V v֧Z1 9fncʙ_k$ jLP/ .ѕz'ح׬㾪H>‰J}MgUC.Ɨzov!$يg?3Oq$+fz)mc Xѷ}#hr@z GnZW?c_Fqbyf%5N cTJ=VSZ&<-Z"}qUL-Q̒ X8j>GE˗Jz3ZM<N[^MԹOGwc;n:%0(l F=KjPD@ }Z].jj (BZV$.O\qS+)hVEF $NAY1PJm@:B1]s/Z.bn/\APQ W`3Ro$oMFhh{#Zr:/QHkҐE('dePok5=&ug7Zg%<)z,FR(# {zP<#gL7')c(Y͗B.YCv¦my._=Z8ui02^sjpLUfIl[ﴕyvʬƹ~a asftzh^~Ys( +fnq3<_-{hHfg#Yji(8¢T[S5Sy#4 h-t*p޲;] m-\~ǧPj.SS̚2b PҠ%#|Ғdih["Ӽ p} rq<רΉNA0x bdA~ML!Gpc:_Z>QU#0QUUUA}ewM@z3tY3ċΘ@`u[u8(D#m5Is Tm(H͎y Y팠e8)gUVEdYm醭HOA1U pp>el5Ղ3ױe=!_ӭ..ұ_ZncH*i'lvJj lCC|^>J/E |~,ܼFȀȍ2[FBQ%𨃵!_fi1 F-,/^ f W[+hw"8a`@pùJEԸGBQ- U0tApPz`1S A}0Hrpqd RxϵqV łi +DHZO4e> W`V9Y5oIF`_!&JV$ G vMNi%1Hr7w3]|g7X?~>x|ۯ|Ky?MSz?RPtZF[vl?t<|Mo|>}+Ew6('yxPy2\D¼-Z'MCK"!(@NN'DxL?Ƹ`% ltk=hX,=A5Lu)_2tl͊r<?j 4jHgIBB4Z@C񔐀Ǖ5.Xm$xy:qG a8N c!a"u@py|亾yCgxR[ >Bq|!ԣjb3OIa>O9/"_n40B;ӆ(D`"z(Jɨ#:T,jܮݐfz{,O=~i|z^1HxObwz?OYJj2@|˘<KL!G7 ⿤sͷpws_ զ{ % t q@:ł@0M\"l .8\aCG/׽zի+sɻp[BBb%6q7R?؉; DͅU:\ZL$@5a"]czZ1+/J#06p}bV``>w## ޓ lGsҳ4LŢ O\'fn1r2Uj%łc$!>mET`zOntykxg.)Y5n_~ͤe|a?>vVۏo/ G<%ڇZwN?4!esz(1ye&|L ObF;.v$D}9= 'sz(1yM!I+6-+asksz(1y O>k䵢>/뫓ǘ9FH/qaX`:@@{c `ػ*)3ƃjb/ž+tGIⅤ9k1tb(qUxx!0 lOX$P=`JJ(}d?zd0L"JuΑncʱk~rlJ5KLߖ+&폓kKDXoS~Ѧocn#hr)2eElY]؀\]DΛymcZG9[+5;.+Ž j<ӥGMbKE7&ZcGaK%<.'OAuv%}Nm]}=M^yDX&)1:0ʝW CN4ŠOƊ&VN(f$zJ>PxJ2 @ iIbk2 *YgJ\2&_?ۛ_}i#6MVҘ#\v?-3лoݹIg=O;BkWC3Cv1Zn%ʶ oD -F)H`&Xg%# (ϸ7ڨ DzpDpTh OOE R>w: Ppu !5w=̻+mvϪ[V5DRC 1dZi҄atJE4PBoFU*2I]qvqҿ_G?9)T??"-z@r~6.A]^`HpKcˈ[8#ɝ?&ea㨜c?:-?(it6KٰL3ۘ8lAa5iKTjO:J 4$[ O+=\F~W}ZrR2)td|m[ծLӝe9c>9? ZOu]<};֍iŸ&)gG!dvAI-;2gSjY r riRY,ʓ?Rz2zDZ PryAWLfz3#RLP,#3Kg]`Qu;EjFP'3JilD[ 8p'IZkJ-dH00JҀRMZ`]tćΜ-Y|LH]'\7l6| isgnܳ2sǤ9>H* 4%oM"-i }I7Z\xcN} ͣqCXIO4jT滃ػ6kW=ZNmp1KܼL 1128~Em-$]$cIu^۾B줬w5;@g{.!R.b3#cr.-V 7 /Dܾ<>x{xnvzR}b~A舷tfÖIoWLZ=kٯO>~?OkJ~/ɕ\SDם{hq f'e?#}XCGGMIRpo-|-ӇY2&`D5s#6ƋhIsTOi0ռj읎uo[32 ēNɺ6_+%$}[_\kz) lОU0L*7g4ǻa~V⇃N'69nӧl|iؗо %oKĚ~K>_7] wo{zsO=2 7{~^L~~o6_Ky4Nl[]/'V+NLo7Q_n{u'+HOQkU.{ʵ_Eng\/VM,VGf^.bo=aR=4oh͞_*~܁ZSȝ`bXg^K Ϗց>!uH8ac[fg_oO"NOV-dr{Fs<}p ' AV<ݎ;fuiel&q4IN>k<4cjY&R|[csɻ3ݾwznT55Pa|Ͷ')>5_[eby) T01|N: wj'-Z/MX/q"Rm=$r|۲i#n˦~qvonBClG=8ϓqi.?1DﳫW>,4D %S;.Wd3Ҡ$%z >VQ֓%rJԄ6n+F$L#b Y@+cB0%s<?$^*\T[Μqk4}ß>zs[y|O@.^ IßDo^38Pk(} 0R$I1uG 2(K' %i O$./]x0 )03iR4餓y_J%+g- )5&!\ԀVʇ~AHо$Ρ~-osp['gr>8gjXD.Q̈dȣ F _w 3 GQ?Q9*kgޑ͙yD"8STE T#e "TU9P*!jn\Fm^cK5(8B9*[#x= ,@+#r4d\ID,ڑjz$S(72*`ӒF@PH%+HAPj_#mZ(;k/5gF 0g9Q! YB`";XO<@5Wj9 05 gT*x)H!!V7eS,1uH IJN YAc9=ˊܲ3&^è*`$%F[b j+ڠyۜDeH}v ZZKyJzm>щiH QU+@`b٠6XHI69m,Y2Ɍ 5Ț#QT+ jHu㩶\Ր@ MA$шT4jŌTKmF䋇EFxO& Ezm »ESBOY{"b TŵnfE[um"eNgWgϵsY]8"ySgv5a~c4_c~7L¼}ݖ.ohgzy ?}K3X,ɵ*,T*7oۛ4VvpzK\mfgYZmKk/}bffuј?t7xg;_mכ>ͳb4C_nna7ا:ا:ا:ا`o0@N=B/yuՎŇz7^^]^suDs{_^께/.xxk ;P;ڲzc\9_ii1SHJ c)oST@^de$JA'4ɏg&`뷮?#= BΖo+Msm?ywgZy>Sks~_=*Wuuyysϛջoaٷ|;/6 9n?,@*u ikeeAȏAXݲwQo~Z'wߟ G^k :PL6:gU A6P0v/!-4LmQxps’9kW2dᩀ| N6赬-±^ù:nDu1oi6ba.JwŻ"Pcx >VQs,1L NPmPvA9[yfsP0v0^G'l ΂WשC@)@H҃*+@5lэlZecrv X,HN\$T!ՐEq>r*4]&!i3/Eb9)'I1 |!A3'\x!< kG֖i#>G2*tVkxb|dɔla 7J:mx%c`^`ԥxUoHOLΗ\4 FS"p Ꜵh#SyNdk1E42?jB|# #<6g.deXV \ >4|䘏x4t$ӑ!eq:,ҮG3i8alDnA@2X¿̈%X KFI!K\`R$\ vSd@.r@B$G>Dw|jx\Il9+dz0iˡX툅zTAVoG-W8~ɛurzǺ\!ܝOUdjxu}7mogn<6o Tk| ]RM՛hPArACzz[\WTz?}_lN:_o^/~^~XB{JHvf}y+^ւa9b{2MHu3(/3H](( ؉eʫ'r0W uB)d:#u"JryrCvys}=,ۈȁY@SBT)EGt H_P DꖗEY6@V-Q%{ "zo"guRTwxܣڔCkZ=4sPpP`cUWLI{̐7C@Ze(\ 1+L wQ& lo`EApH`Ȣ'1P7 aش(C .~Xù<O*_gFsLx,/C$[ͪ. 'CGoߜ=)~nl(Ůe(I~PH-8(˧u[)btإ]0-fu`xȦCK ;iΨf"DY,tV#]/X>Ff~1OR!}^nxX;9]:͐yp]agxjFYc2Ɉi-fՃ4&`m_(UFJM8XG('}3'j_P%;b3}cζ7"hq\cNѻXP6Zv8PZ"ͤ  N:ڔ:[oXd&[^.'.Mn\Hs?I&Y.QV'`Ą3 I/,Y1C\Q ѯ(52)uH zsK/ntQ`Ɵ-~eT: QppUu؅Ef{0YjΌMYLC9."ӏ[.il7H& zt#YrN(B1RUQ$Ia2~@ ZS)$O3ԣ18=?(*M,n8-q/iD; օU1T3.Ukgjƿbo+8e)DH@ 0nj.mIg@IN<8TeFVd :aiA6Y(JڒS /_rI1i$蚛Å9Ejp B޾' sFx- - ж&Pwځ,)Le QRۃ %M&^; 55 k2kPy7i LyqV@qᘬO~ ( 5ı?Ǔͫ%bjCtn{bv^t%5//F~? ^,M+f-b>Lu?l>Ӱfyv1rg]2$#gHAUm"L$*(~z^6-ρ+{oo~:?xL|?,{ 8F\m/h >= ?j`zaQbmbM)Kf`ӿ-8=Ĺb?^7WyCxb Gvr?ksX9a)ߊJXklfPA4D%.U p#jЈUUlO|W@<#BgGxE@GеU¢LW=Mj`5z) ʅiJIV z[J4}(+B`; /|꧛{$`4"SAJ)∛Sg|F4JIfsc>RQ&)VsCKp8~01v Ű@f^m@i&?=|_{x]~u(j$yzaQ(ˬІЌ2KM99Z!62ʹo5< QUdP>̻[f2rb*}F-nju 8QR:dJP@}b7b.?m1y/c7" &#rx")J0pAZPMNUre a ֢L-vh nSc_`vˬ8ptKX¿*"}P3rsJAf#^L}_Vv?HNToWEt6_!j1F9#}0 C3RTx+M(A\ gDӘu3^1h {^@xkd%{Y+{(/q ǞUP,~K_>)%'N sFQ#<_9q,uX,(e9*Ҍg8XXc\DH7|>)ҼVT.Vg_iâjVgY{b%KK Αs2aLG_G]lʷ%vZ}CZ;|zJ ޕg(e%{q#گv&J]T<"(FN3T_/fY:j6p*X%3Kɬ3x.pfK1 0+0˘68 cjwͯFͤ ʏɊ~Ȣ69Py# 6Lq]N0 9ce+qʈF0°?3I)f^>Q?Oi/lFsj;ˋ% :HEyMo^"[.ܑ $PjSέy(#ET Gpe5&49xs+xΔs2U(F gW-yMpVX%NxJ'>]6]Y<EA/65bV, WQN]w/S * X^Gx2"zpƶ+T?, ejxvJk$by SwAAW-+d !}@y2xPÄÄoK_5`]:﫵6WfkڛUR3+ܴr,,ɧd&Ahf0 N!x\:瓫g%GMzs(Eҳ`y eZ! y"%Sj3ZqiY-UD'u[DDMwJUnMH3(vvAľvk3iڭ y"zLq 3oFOaQ!gİp~25ϨlW=ֆ \;L68O|=v g+ .ZGw_O^ P.Y>.ha85ZM(QܿO!ȕfsޱebHiZ_Y 痍-[\NWzju>Ċ o{o"hòE~ў.i*9}R99 lm-WpJd]$R oE 2 1{A3~%.AJ0L?]dQƱo ^0Лp$:о`65"իfz1dW_%_)2X;xcO|WQ IF =[%hHܝh鑡63㆞>?p?}$N}W ]D" )r6rk+\Jca,k@F-{V͸݄B%J .<@D6MɆ/RA.o\ ry[t0N%YfCBfϸ(DCq"T(ßTH!)j^\M(A, 7pľ2Y2ĶJ9תq~ .[* 33L W8[Zpi f{uBik0R13+U& 1qe3HQ|׿ 9 yM>צ̏1,H=E݈݉;\8K"*N>x;rK"Ar>ߥ U)8>fnYb #e!kuYNzb-5&S4.L[q٧ypWx< #Boe}Pq!}/ e ., 6u y.]8w^RgTA .JAKfOuGIC |2X>QO(?T 35:=D? WJQ!w"D <$IT\'&5Q1͉5{=c;w*椪rOTNXu28$;O zs,: }j$Ob$\n$0FC>f0)nŽw@N0w3~/ \yw7O>Z],лf$D?~ .|1 \qfc*9]t*{hG.~U{x/>ZC;S'-&&͌d.ۜfxG>y_l1/`9B5j|F@>@1$DHS[NV u^Zy .YjLx4E&Bb Z?cXa >6"]~jj=P@߁C:=håf'v{-`plX@X(~X7 O89sn6@XJ<1DFeEa Dztd`2z6Xv"~[M_L.ġm/6ػ6-G4%Rql}ʧ\)1+$K?H- Wunooz{1:WܑJC'K.nnX 'ǟN([ rRM훩?(=u8mZޖ8Bm^^ F Dé&Pvx2LRL%XhDmAaPHcmpl8=jYq smBZDh&֗l%䃎R6tja֭/ת/'Xg˱.*kۏU.B4F& m֑ccXۯؘܐq}6Fo~l ']TB{nH8nx}!H\Xm8*T`-j̞cq2V.QßjZ6CkhY_*e9݋D.GF7N&eT fلcy2t3^z*Tj^zxHU OQvs:cv{={ʴ{i6}Sv[B)c7|jMncyXF2evkQ!r:)Mz]7 @T=ۍRiP*mե``#PG(UԆRU(UԆҎjz7m`Q6=^ۊ@sTݧw'[R׈R7!͋mPi@؀*61v:P(Y Ş,LS By0' ,rAT>vcשa Caͼ-jpR{Q}jgYMFfeY#hZ[c7"'ySۍҶJ X .\1J*%Tн(ۈRmPMW (զ ΡݡtQ ;~;CuvEBvZA NiM1J{2L >h6Ih/ViH`ɑXJU Ԋ!)ؤe0jTF =U "3.#XNHf͚U7E*+%^MzՀdF܂QEGbO@DH&K(O(XޗrM(}.N Ԧ}TUoJ WJ86FJ9 USn7JnCԝV@m(vNۍҶJ \TJh+7m I]T`Qο`7c#PXP(v]>m.Cz#P4)JcV9au\mÁg;~ّO1`,JEVD& Xrkݝjaja~?}<cr|6u-s ǯ|uQ{SjѳhXe_.g{3f~NO8KOٟ>վ؟LfbraY?\؃N۰&ݲWuw3jBp[ bW'gC4weNI;7~)&0j<5ʂZO &Ww퀮7/|wN=S?ΡR'fRjjHGr؇(%~-ǚ} ҥϟg>zC|GމZ)pO.|4yo uu촛j/XzW OQBBY+<쵸}핲 ܄׹Dr?_. ^|Q"" y-ŵR!קV* fNnн_~KF#<"^{T6yUįKvu|>g)rM"?:WuDtsd;n'gY~Ŀ]?Hqq}?5GC3mTϟ.ȗK ]O~ͧ-]~gyP4A}pN>)$!wgU\o[n\oV@TNr>I8OA 5$GfMnpسRt y+t8NaN;-% Tl[v揨0TN\]3(sI`PŠ%ukUg5\CAr!v[LnZd՝ Mi`K/SHxYJ-(l^&.  L9gV@JEzHRȎ-:!T%E1^G?kkhT10uYUw8v _(r67)gW]ԳX}I|<{s'mo Ȑ맻qIxQefטsnq#sqOnr'|AyL3uȂ6߁0D,EUl2qq.RMIK `rP,q!/Y&N<maꤰʷfu1şVK?hKj1De)m+>{]Tt>*i\1h!,j3̵;yg5E<$n˽p#_>op],n>{q$]}A\tL_nshXn?RGooߓ&5t|s~|Z N/Pf_#L+˫BONxky7 4/e4ՆMN hA骜?ǸG ڛ/X߫؃-hrSS%$eGM >zS-Rn5LR̎"Nd뗗Νۥ"mpo޽yZ藱b H1e`y+rm+a"lVi_?Z'nu b;lL]js痕X'hQ h+U7+]^yRvAg~9E8:#2%J͉~빞]u|d8O1ǫ| -qwbJEey JKrd<,͂*nD`UrKuIHd@ǬU sIbJ:X҅qLE }mqnqn VSѵ  o~:Z!ޒIJQJ/15a>4:ugRYNNO;uΪ{ rG/'ѥX`pV^Va)ء3GsM``#qmF^ 7pTmkjwC[nq7w*ry~:Y^jǯA®9EPi}kr8|ת')_Ex~}'mH0OA3Ӎt:_*vmGGzE"e:hWx@-SzgW\߬[5wsw`}侗WA;ʝ;Ʈ^1'k٥q*L,ThY9Zfy 'VA}N_ş·>ufKY4@џ$GM<։}Gv: ܛvnmp g(Rtvi->6!7΢y EE կ+,ͪ"my5wDyzmX\:B2\ᩩUΡ=LۛקkG[B`fm;?ڬ$̎d[zFnJ=wW_=|^\@M?!G^{wHp42 }`Jsٔ♸w3Tj)MK ,cPO3A|u|r,,镭 ~KǥK/^Vt@\bf @\N@Lێey^x<,$ xsew,o*~vs)[؋ojm_}Hζ/)>7'o86NZfWfg>b53`jF4S4WSG!x{ȋl#w)±"$#w zE:' J ol4d9߀(#;QUƛwXujnZkE2UZ/mSzwTO8d[ݸ߿-M\,ӳ4gSE?v * T"Q3^+~   õg+At!&_"3KXNgV }*f5︻j[vl!8V)*ˬ1 `O6z=-#[֏rk?sQtLI!zjzI6^Ϯhlk<`7TjCWi6-i`*3\Q%0!UiSÕ#2nU&IjRD+s~t5"\W"$IHL̹f9̴Qv&N Yn2p 5EOc"jQfERh$ҔRp,b@V!/ˤCV>4fi n/{ppD8hr+r}_\ؠ2 A3ST&ЌbUe,Ȣxdi\kEV?OD}r_]u/Bռ9hWIӄ%YJ,m=1ҸlXVxwԅRJ-yݟؾV o/n[#!<:EoLA,W-Ao㴿fTpvߞ|D4Gl$No[=zz\+Ѓ$5P_f7װz\I7^ˣk$fb:y-Gf,FZN3&BFf$Ͳ_Ƞ2q{- Ap#4n j|5\#lᐛ$g33s0"MLݩVՁXQ}rDQt _Uuj,%~ aP2=]i.rQŝ.DGIe0l41lQ͗%ZvK{H3yW#tA`T v5\nՃ}n\w4)yn J2ń4J$T"9PBƲLˣk=.L8aD=]K4t;?bJ7lB&ﶾ&W~#3@ )k'#P!4cgFh'356F) GI9Zp]B C)Z42+"uK(jlG<#TACAb]`kdF"Xe&JRU"),N&bA O_y(r.$uӖbǫy3W}o'jigBϢBN& f<~Wqzɾt΢c% 8/=s6cu7Қ1#D]4 Io-#c{4!Impcσi:k͵wǵީxz-u\ (`㠑_PP`)ÏvZaH7ٻ)Bn ]`g_j2T@Y{3(h== vCWGA8TUn!mPgy%27H\' 5XrlQNHhMy}[y+>}Ǯ[Ux5h #)̟#]IeG]7]onA}Β)`FPSrOJΥlR-q gM]#Ʊcn8awhL)7S!/Exߢ?n—[,|X'!m8/ h*+\n<[Y9O1ny"_衉>+L`9UD1, )GqL{w-+i'+ͽo=gb.fU!yq \hf$ uY(׌J @!85gsk>5W$mi$JARTX@jaf&qxEP޳#sĈ kcP.Un}&cDj9Q]VFf1:?J̎i{+aǡ͕w CtTYp?^"Q; dR8EڄL"p5>Ad/J"{V= #{=RiCcGE`^~cHW4sL|N? IN~5T"w?Ƈ}%XbJ1M&79!6I4D\ &lQK2$q@F5Dc[)~^ʺb8Y&%MZ&(5@E\ 8Ui0MSi8JbCQP7lt6r>8$x9f<i%`s1jS XW?J!kPin5de'ܕUjC{hk1=il'\K]&)apY""d41JI k&'LT25pƠQq_=X@& aCtV҂6a-&J\6M%͙TՂ\} 'HP("ȯ 떒S='p4 hz%zO-J] '.) U3㯣NE R2 -ob' HJ og,jJWJ=} 7/TɁ gs}w|ΐrHx,ԙ́(m{8L?3Sm@&-2 -A $ʏy IƇZIP#"οSzNRG2kNӽS6ZDQFVcNQNaRj4\UHP(Zoѫ)rJϰ!s̕Vkm;C*wAuv̮ {uAFSX /AqѰ. Jet=)R/{aEH=]솥]3g_P;"_|kvMAu5T[G*a p6!˰Yf}Y_Lnp&^ߞa"JjuVܜH&vDq3| f`~.7+wE\(M>Ko1HUC=zrxwKbz $".<`ʬazfdFg̐da0S,3(RooW(B2忲)&p)xX|#oEݟ$ON _g ڨOI`ڨ8,s m/圣|R(٭$ړ D:PN3ޏdZyaoJE/"/E.eʻ`Z`c)j2Zۤ>[Jw#K ,ñtgR;])9Y2inj3$Kci)5#KXKvi,Rj6KrHv,&٪JcYj^p[% 郱tgRG,:R,v,&٪Ԥі6KıT"(X 6KM7(GRR3M,=hBLJw `)DzԮJۑRx-낾[˲SnlMjǸYDKUǷ8*RjZűTR3LvV& G,5:`q, Rq:KX ) 3 űZzf8 ڂKűziREv&6&j![jwd4Mgxӹ:rxHFJSA7QYYD!f N}rYxQ$--3{d3SsLtMT'\7 Qn>wvdViz+[n]ҳIQ | j)##uߋY-dQ]Ye>F{yE= /l{V퇏?Irww0"jBëeunZ'TyKXWIy E%V!],U2,oUh05ZryFpj_Ukք[V:>D+ N)=/~ |Eg3c? 9 6ejܞ&pxď_]Vr|I9OV)+(L;X8t)(lxBb%Pr$*M'8mXV,=ihF4 uj e}j& {*Oy~QWnP?Ju78z TJF,Q=!6),Mi])ұ;M++*-@43HAf>Wr / E^J:v7H|S ;CG'73kgRh!#a=Bڋ1@aJ Ue&wރ2|Y^ gi CcG˹/?6gxB:-z=όbDΓǏH/f﮳۷އ'&) BC s)8K#oot.!K(N=aS* U s Eg(W2@9E g`.2(BYE! b&}LSHMnLTc"*xCFːE1yp <\\=X>o4$<>Sg~}bJHy/C BᩐςL>MlϔFU2>=|geJw-dr0OǨ+pa@^ZgbfﯮVa]Cʙj~z*QVɓɯar>[wSF;k^!^dp !:gC!_ j7ZQmD/)Ur۰h Ð>GL7;ݫn?3vu"uSB+hQcX~Yg'4LU ")R8Tdȸ9zdӨ."TS)cԗy4z$-CqzW{rg:q.0Wz$Gygen8lsb`応sɧ S"O\|-Ņ,o؃9yKr<}WAkQW/#/9k*ބGJbsv;軛@'_=9{*,0/m>ޜ-:ݼ~J*;|Mѫާ׬b2n&WI+y$.dKzz]7@f檕5éxx0>˛wVX8y!lވ5s҇jua"ua.] d F 'H#,|%8> A&Hq7<.S \A]j)* x?W?*亘' ⴞU*&=ʪ-tvBGqk-S17S>VK/_/.&{ X)ICHD}]LWr,Š+БSJKeLCJ<:P_#ER]j3"oR >Hv ETRyFJI!9z74Im"kV/?_-'z*ew٦UiOM]TF%+!fjB-+ŊA].mU^C=uw~ R{}Ӽ[I F@kg0=;M88H-fhd5*1_xa|buݻK k|T6|sP Fy/JAF) ؚY=/{H=Ujd[@KD,bc/B)*SHko(oGD#n&xC(Pb7pj -RMΥ#ŢeN,6 Y' 4W| h|2{}`]cGQ=u?:AH ٛK <ڥY&D20הiPCcLk1ɭD (P?=jpRN!1"njSGQGgo{pHJJ6bX  MܕMY=MT" 5sNp7S%7+8DFPn&{j3h ;Pvָw.CZczVXE'GwDww[Mzuvʛ{5r7${d4ebXv\yZ : =!Rh3cm FJ4.XZc :&Wz1vqFjFu'՟O17K7-Ck U>{86 ÞoM -\38g A3QÇp !4-u|{R`{Q")&:v۩T=fLZZjBXU~qS \rIq{}9Y'!͑c3MZ e!jz3_x.gCYc|%fA8 (;撇7]#fAp(gtwMigb576ٳܗ|VQlG y)uf* n]1Nhb݆NfݺEZ]0S,>\nZTZ u 1vFۀ-Hu~Y 6vC^8E[sʚ;euVCmgu,\7E+-ze9k8}s~˙ž ANj:|]PJTYcTkUR8辿:g 7LA'+=덋hWNu#?z9UCTқS~)<;,(7U\bt{ZȩF)gs* sſQA]V:Q7Mu#O%z@^X$a,yqIFS{Ω}u3T[uhk85&FkB;z'9ZTeԊ,#gl )D e2QډL0:I\Fmɩ6@=ج|`vOT;G 5Ab>!C^bAg<":I s*^eJʁ&-2]`4R O1NΤIbDKf AR9QmEM pBg`:فF:oF gp޸sk"1t!Y7挖 7"3:Z-\ppK2#)Jk)1IOD..0od*A0%%,͘1m\aKN2FZ^v:W9ED-"i2JX8m';цQb2!!NK,)qQJ Ɓ!>YSA9 Sb&,1e8jLFydI oM%$tFOp: >eb7`nPb iM)m>ı35e[ 1o>'8J%'Yh&JS=[{Sg-8+Suo*M剭k#ty!1B&aR9h=@JdtLFnScGf 7~ p=++J@[ x< =I̝Me왎"+ޱr3ڛG;H;/9T) 3QQ]goaHZ> wBC eΪo/5hs$F+BFϐgzȑ_)U}CA}ȃi׸tO{Cf$VE& t-d/nÁ:ps pw^Jmr8xes!oAb+Ɂwc>xP@'>ޭ݁)nP= w!oA|%}.j\VH۠Fc+TUN+Ɣ@&vG^aZ]^Ǽ,$a"@Oq|)-8S6w(q}>8aad0n<!jRvHU9BUzv/8Oqn+.vqj9L:h7q>G(ZnbuaQ*EkяxО:GpI9B͕g|9B۠FSéS+@#ng Hp4r̒G<~4FA?@cT7qUnw~#An9OZEԔrLN1+3UZ IC h`E,eV IIgmYjI P<  = :!)N{z7)`[\ n!`iڻ:z:!) {= ~ǻuEhZ5n[yn`ΉdonGN]z8qڮU4xrĭt=X9g")޲ߨZvԒ]EWmesW-H=B[M#-9KZ>\O~^|W`F{bC3siRqEϨOӯ4D`x-WifM8o^%?,+m=d!qd^!r8U9Ӽ@N*%rHYq24e "Icxdh;aUrSx9mimA-L;xjqwXF xhYGOz/3''0.P.;o! Nć>5n#+} ~2~|2@B3XSXb J1e4ղ2(L0:,n7/丛Ed=g,[sh-ܪt(׀ԴOyybRߖF)n(&ZonK}[q67V\" bR\g#/6J sC)O)$@)an(5R ň!T8ZvIE}J# NjaQ:D}4R B\jFf(E (%@)n(ͥF)n(yJpCi.5|4lRJ_H-!T:W<sF/=/mYjx^:lBRHs^ےt(E (E Ԃ16J pC)/@)n(ͥpRIa/PJ&+^._Ae>K1S0'D!0© qK#8N0ݓͣ5vqY[iٻH;2JJ_,[n6U mYR D%d=s_di%o-*~ ȪD\`C0g, Ǎ]. 1$B^5G/^͏YX]z@H/EQ4VӈϬ~(:"t(|[/dKw:' P%>eia`İl8eܭk>8?,iCZ5WuF.;6z PDhzո;ZR&ۻR(Y6sJyrzQA]J1bp $Pa3PPd1N`,2E fYA|:)PHAcDIF ўahTz3LJr1^ ɥ˓?aŜP#a:YFә7鈷ԟs̩a_/3#VD0s{'Er43$_qtcR2D)B 1!'+Pf^b$yV7ף4<\@QEdy{{UE@Z;dA|P9&0CN^,Uu0NhRyИGaN~hz'R1A-rgk^CDfQq}~+@ԗz1=_VZWbAޠ};@ Zn saA,wWyZK@+!\hݺQG n?2?@snյyWN U[ٹ4&W]SvlM^='BK ^^ d[ש0ұ\^Eȍ1o7ǔR_z yqNϥo5whx1]zB!w*.a.^L~v&vs3huyρ^ .UqGiz-'q"?=ۺ?轂 vm ?{fC&D"zQo!^wq2fR7!G{ Zo@[fچim-l[Ԟm}t^۲{+j̮,@얇ph>]DnlphF Xw])~ 2MԾQ"`]WqTh4"h+Kyk"$4jHJd L݁ ]jĐ6xN7Pq!Z ~d˾οթE/1/@#bk Hɺ'7x@ͮ څ"9wp&ae䎷ͥZ`L,€D%0cq$<,#[e! OL̈T1G(CEw*7qhx']V濃THUx}j)YQ,P(׻4R(LqN@D,@QdBuhLe8E8a0(t'dzFEdB2Dh:jz ڲG iJ=Z(*sUTkB@p,)&,eBvI D3i"4v`*BH S|GzQρ5# ̈D sewR(dN1`3CB){I<9( ω@Q ) 2954: hRJ;qy0! i?1R05NFT.q$Qe[]P͜^/n .̩qK~"3^oun݊Ũ%Ϣ "ҷٞIBTv+$OopE6A%HawU7, qER X"x*  ߞ-L#Jxw9}F E fvgkE8t7r.0r_B!*7LJG0"e`'{4iJܷ4dDJ7aѬScͣ v{zy;xP냰#РSA4gE"g̭-B'J1Pz+K[l4ǡ9e q^X %z/Tu&D-0 =| h ~ =\^BPJvS jO.xhnA$UnL$R2ȓBQ'|CN WNu-d mm?t(u,9f2q.l B,O )$K$"V,ɕ'6mJF=S#g~ըs)1-L euVZX^b1Imiᒓ4RCE/l*r)#J,$J;@k,VT( 91xFF5s׆H_ڨ@/bmx60e۶ 9m۹5ҁ:3X+$@:J,廹uuvp!K L%<e4NZ6:+29㞅1 q?И 4Rqeƨ VMJ/,4vەR~%BT-G(+;Q&][t+->34s8 VZ)P}R9/_#JN͔vݹǽU.wRikJ9-Qg@(-ȇ/D X^~I ݆|;5kF(uk= sdAO> UrCO>ArCkf@#/+ww4:XA?u/}J'E IYDjo[|V LjK \9ٻol!#g!d[w=\Jv↰қ;4G7vڤ@>#='㵰.SR]80лC3 Wf3|$-]jbP}w=uvj!բ`CECTloj\Hzng[9!흃zK_)jzwz+D}vL)$10 ,?Y|;CCoiV pwud 9˵7Z뮵Gª9S៛1Ι/ yѶ:h=csTRS|v'vZu_zJƷ$3Rknp?e0D |M B ;5{go=7~A{+o rN$<@BlŨ® ZjcW~sjQ빲$U;~VT=ۡ%V4F;8]=;btnL^ĭSӁ:Wmҁm;HU R.R!"P!bƄ"=Ō;OصV/.fPtmܶ:sm>Fj-+KٴLCW3n|!,>납ݴ]|g9#@-9{&5JG0n__N3ۓ{ŹNo'^[Z{{}w9j}|̀U?F%Щyvߖ?r.b0ǴfUG~ܯK>bV'@.+yV *</jPV:b+h̺zZXP'mmZ.))q>n$[B6Q7*кuWNѝ9%* K=o:USexʃjQe|~}A_-y~[${sOePfW+!D&WgF'%:͝:X ͨ'7o3槤 yxnfdao&O>`yŹrw^:6BpАwlT&hIfLi-Le&ih9%K- N5yK}B BG [)p#Dxh|Te|$|K%eGTj#!3$-T*5Qq#p?*6C.k$JMR$ijgj wIEa5L'\q ?{?OLGdK\\}`o b4Qk J4&X8gXHKiyۼg>v:p~3d5P|dهr}hχyr[No?EYҭ?/{f"{Fl׭dȷ3C(Ux7!k:K44TQd;fP)ZL,3'F@F++RR PeHj78ZI  Qla,8Vh5nlUjlt+X MBAtgRVY9Y[<,=>ST?W|՛>[ok,=FrRn-UMRJmGRa,YKA(=RKvy,+ZzRa,EYI >[ڰs#Ktʃ*!|dptgR4Yz, }VԾ'ңf)0.3Q4 [ :Ԥ7KQ0X"F1,5,ȖJ[XYU_Yz,"BT BTTY^>faMi XvAj1,+,1B4gn`L >[Z2Yz,ߨ|n (lnj*R%o߽Va`Ӌ"_E>+^_\v1_۩0M?gRm-#8[Z'j{sW(9{mVn,CD$@joUd&|$=xuY? m նOQԃ } ̒%Qh}y `.I(P!Vm /7n9ORg~7o%Pӕw?{ܢZǐ`rRCݖ$0V^a$a[8G\û!/IB܂$F$wh;H=a4ZPL4UeL..Yin;Z-^7H;-ݾBG\3s8$F 80F bc1|_/A!'(rBMIgE$fZjTuB0UyMe4Is;EK,ohjjllUj8VN:["p 5^[>[څ#K`X *R0a,RK&XY*mT3뢱7/ǔͅo=ϳOɓw;:f2~@Z=Ѫzڝ| c׺>M,}$t;WuUzifbR+|JQ[Ab;:|>YR$B6p˛ŗ qZ7g?>\jgo?npp%u dDj JHwʸtlվUjvA`6#"/k@ J*s_M9Am(Å\|"!wSeN짛A N W:LL$DI\@A`LAZ=]NvTxu()r)ӝS# "ڑo9d "bz[_3VUއMcQ8>zϜ ;kM?Î6q}& ߬t]#/.?hb8vX㕌c\6OzJ +_%*76/yYE˳͝'.\7˟~5+m3ԝ=z<JgSc3wf-an"\f] }9*{/y<.?KK"|)u^..@LB>D|Ϳ7".5|WR;9-- d?|'!FOEOndU|5mG+, &BIX?~mN`=O 떩|i_}9_zjr) "F ڎ18jJ1 @Fq7]e0Hli9EAfxTb5&@$mK;Ěj$,FG5ՇEGH,ʍF:6M6niG1N[R0Qѽn1z!*(rFoe$޶] TBz `;D"X8z߃jBs0NR7UeG j\~v~te(a4?A :TܒZJ2TXsG* ;yxP.pSy;ji˩rhB)퇰C%4h46''= K sĂ_C .̠-ߖhH3kQ*Ŝ8ϝ&:g+r\U u;zg_qm+k [g)h{q*:V ռźp "cYf's^ nzV<As 1sGZ`#4b-Ըo6h հT!ŠWUT)ʪ.-FmSV"JUIY2R%QVE^?P E" *+2iW9I !Be0ԅ``1T(@m@.nׁi@QwOVddF&H5F"AH\QIEV堝* +Jd:JS5`n[ P%V><:fnH8gl&(!UYZ(I"5KBUO@Y(42y\ww@"gʸg#S`B;\7C q -#Lc"p ;j5Y@h}:qRN0%+Dl66V|)VnK SE7J!X4"}G;-ݞ;Blk8(ns =+f! (`Bę/t;X"iݫ@\B¾\9#;zB!I5n%M>)n~z㗆;x9BA-BF\8AX_w=<Ax(=^y{ouzzt-8j$u>:ҟd!(: ːm3޹F _ca+߷kIm[i[%U6644;6$ J JBsQNP ~v 8x#/לP],fO\<0\#T~Ԯ:n?.l_n|W@-]@. T@4G55>*uiRLKؕIEi'Q̉H%P~3 hnM )#ҙҜs3!- QZT9dbe3HQh[]`6Q{UdPZ~rH҅ש}d:13WI8g_}'akEJIx" r eDuR@aBVNPWe_V1= Aq_% ujn}5O9JEvNWyyde=+SO岡&OOMMAuk,['n]11ݦ+v6w~R*лa!Dؔt/^y7 : [W BLwx) on=w~~>лa!D[۔aFzJ1Z0$oCKv=by yN7s!:4sCC(8ĶĂ Nm0$jIBX?|GL8|2I :/󬍐 0$@< `͓BՔ<QkV aZ7뱽x $29bKW2u( WVS)?ْ8(ˌ> Y/_ n7.n6_8-7\tu_yqliG!tuCV6\BSetaXT4-Lo|Towv@]D+d9#mR)JE.CSUd)U)3 2HF;Zl?6*2 ՘@fg[UN%JYI&Pj8i~y]7wYjKXܸ BVmQ)5,D};lԲ9V)$Jk~bVS!8EQO=lou!6q_慜Sc`zR~#W͛:tNHcGo?EXݠWh?eC'TͣDPRNN98HtG>u./k_)9P^y7wSB1d7EId*pD.DZ-ti(Ki=V,z..(_DJt2@ ʐ:~GjHs.)zrmr}VJWٿ}-&g#bH Hw~i!%ȬJeZLip hʴp Ca O3m Fe-N5;|l0{7#K@C>f#eB+)r #*9Be*IW ULXH AFn^o^oHj$]hNS8F]#@Nz\GYÈW4Fh~}>PyR V]ڌ/͢ř&)hє.D]9O>-,=/V_|OvX3 [uKzӎT~٨H}CKekֲִٙ3ح6:䰏a; m,4NKy#R{!bk,LgږZacdhJYҵ/UbVzHmT0+udfaڄYJjB{ң0\c{|J+=$նԶI=~cRKgx\-,ҕԎN51[Asޗg6{R_ڪS^zV JUYX0+nNVzVmJ̠z2B`R+rVV 񸬴ډFJ߭%(i޵BeMځ_duհdl7h6̾4I|B*oG1 )-s a>_<+T#O(􅔂JS]()oTM%^v4__$d!Ύ]HctG6P|$ -%LAeBU޻k=9~tAg ey0^thL'yQA=mx hΚ*@ŹI|** -U&V/b# "ڥ*Ifek"s]\ǫk(шdH"McssQI| ̰Y^ نeX4["%#+YCUP0Q[ig1dq-6&8ǑTf d@@)V*I#%,%-Sn=JbtN'{x<^]&<,1*{\(vvH.8j@+6)A]x6.`& 6: o6;YZYl26u%59cW}yy'kUq7irӽ5px*a  YRDCh&+ c5I+I389Y'I,ԏlбpTX &: 8l-?,v`c_Ar!@;1,$YR΂2R),e)XLօt~RT e7QH%G")aI"(ɩX+ڢhoHOB`G 鍤9ښjI(e0&k\Ukl0E'9Id d03Ϣ"aADV4FqPopJ JD&(0S)' v'25`5ގc~3i֕ѓDVTBB#9&Wd@"jeQI]K0"[ ^˺vrH#[ZB:͹"REp:iZ!S==E6*6v7p6bG1yJ侅AMK)hHL#_Jd9@Ƞ%;`48ĢczR3vSmOjf0ƯhU RI68evmg+%Ȳ㥈*ݪ\LLXe>2N&[.hD8ą<N) YX"Aբ ~{Rȕ× ;6f3оҊ1(?qV̂b8Oʑ>%8S)'(bOƨXj0:MtƑ4-yվ$Wp$gts)*vȑM!J ލWs;<*i9YUJ&s!1܃Z#Z'rb1d˓pPV+iw"+1BME0h*zIk(mÐlN ZD4\bgU%bSb h(-PJΔ$F鈟%d˓Ꮶ#0v1PL ,ai-p:S{_ r|?-U~uՓfq/j짥zuuJg-Uez nhT)ݻ;K,b?.\_/\)P~T_*(wy3t'Z%/̊5n~,k˕|l4YO[ esխztY{]~PXG6Fbѝu"'aH:0-уDICNA h'?J8A*BYtl{B1r eK]PzD*ש54YZq.ZO&>ᇚb4 fMޫ eHeiM>#BMoƙKat2]{͖S ~/=l1]jhؒ|L_^enuؿ\p%Qڢ]=Jc_!iW I'jBԝ|Fl$W<wHTQ<oS'wWȋuSVwbo l3Z~ +P-3$ 9Et\mJ4<.T>Qi^y"mM5>z'N g8Yn޻6Msv("`}eCgmT. laJ6CaR2V]m;B/2G)-8/6mq=*dS}KgT?-Uj($O0u}"颍+X.r%'ԜLȊ!p{5 \}nĔMFjo|_rab*JeS&  Zt2A䙧hD%UϳQ[Q毣}x.i0{鋳?VΊ+hcߝ*foYذnv^Wos|WZjR.n-;|xfhPJv3t̓rt]=qYfQr[ʝ׌NO:L[~8dyzq7?L0NT-u^n_?*!IaKDl7PMPKah{nfU'c3u?q9sXϷS,F o}KO|zg=Y w'lmXkz`yP;]Ȍ˃~M;znȰUzkynyzHC M2ǔ٢O,* eYt5֍F&fP0+d ӫ'M.dr&f3JAPZ]1F'N M=?C*ԓ8߱_s?zji*px~VCP"hdi3Bb\!S;3!B=ёN B*V{q"f=i?Ke|XM^;M|+T1hm?zqYP)${;!h,vjCo;?\rw㠠LfV~r]3mm@Uډ\㨍Fxwv)Cg;9r:> L79= i$NN!8 c+.|Fv(r hTsږSm|ۖ?൉toP\&ai\JUhb@[OjaF>>쒳$0Y(,R!ch7dk*R tLxJxb肘~RT% Y2RQB`X!k$[#J6to' tZZ*5[iu Vs K!蝉~],g۝O1˧6ʜRV)),cA%|xr#XzYQk ͝]@kW9)Zѻ~>%w }Y,|gB343 .LΪQϪv&i=abo9qN|:t~2*% يRkyƬ(h0gQz[k^,MgoV+: (,FJvvEQ`Y}Ffwz?y|GȣVwmMm%I~-kVc/ލL<Ɖ@, C3~$,9#РԗOuA de6& b`ςEFϮ)4U: k|G%YW(XFH8؄?'>b 9\\6H&VE lTh~tLѓ35iF''ӫh]-:x>?n0>&y6 y#q@a߆C. ߝӆhY2VO³)h%=%I1 /.>72:CPLlj+Ej7i8#⋵r;F-9{Ƥ 1&g,19ȔWT^^ C3ֶۭ dv3m1`jO&ssS|<`t.mM,T`L%[,D~s/Abm[C,]YyѲ`u R:L3sk d꼨߷ ?9 Q.h(Rbd86 J赩s#l!JMD)a2zsp2~YNa$relZO:r׬b3/$\"m/NhV6K"^p1w>,Tu^E5n͜)mвևnO/G[>I6GN ]MĬkw)kWצ.smWVW!޹R-< zfyX p?]6(Zʬ@!GҦPqRJU'/?xBQHtri;r/ si2x , L-!Acw@uLA [(u\rPIH僶z:épZ֝(s.J/8D UJ>a3Ty4GukW׫|VwDkN_$NIޟ|u$O`/gkjW}1aM2{aΡ9 #ld >Gfte0\c+K( b3_1{kY0B㲌uSߪ j׹O7vjrz+v^io`Ɍ+yپɼu{HrмP'>짿\N67UI%%r3;:&}UFmI-:>dW:9M??-w?o[h5z`gwn.n^ƶ}J;jɇ3oU.{\p7`/\ |8U}Bv# w4NzՊ>W&^vr+W::z__h窬=)/[#}594K5mGۉ8.qpD wdjI _?R|6uZL|8}9/T-[u4hg~W7D O4?r;]|;+ׇ<[|Dl?vHRORۏ+kRu 5X ~뵶]O1_^Til2CYt'RƷrmWa]EH{*v۽UKYt'R17ڍ`AJ6;h73B}7{ǖڭy:gY}|wϕvBstozsb+ޟ.~m[oT6KZތh:ξ룫*_<"]Q8yxS'<{lKX g؏;Z'/gu# i_o;&EB{EVЍlgX p.@2# x]9Xqop?6JX}krns8x7l*Q,yi@GZ fC-ΐR2B <]qt$]׭њbwmg6K̴PXɳJ "5] x kjg#bsl,( 5f|P,.Jmmo_: y}/AR hV13!H|!ß Jz? qtakP@y:"~/4RĔpmtX d1g)%sNX3'c/Ft,yя&^USQP,E4#YlD%HK`H d^!Oy5bL.5駦` /0U`a1:rIAlXu-_{"mj'YEe6ǺR;O.!n[ͮ꠻))# ;5qjrsF=HA{2.&m?T]Dsy-1#"0H`kiRG!@dOwFiC ds]FWi#"\:(L'kO0>{d ;Ecٍ` FDdю^٤91]d`>: gr:R42?fgȈNa!q*Ȓ |3$3iDQMr-Klϒ6´ḻ VwHh?1.@I%^mZ@{,(Ηt#ɥ>|ILJAIȗtEɗtfP{X.j%$_NH ,4AKrv/]a%]+%|s $$NĴnk\ +u+oymWb;M)j{[>nlD1A@s^$iP(C W9h<0yzdE Y^0oLaE9#W@2-=΂{Z<3! Hnŀ!#P$E"o2d67t0B(K, #|LRs)cc HNHQB`W `a5iыt w(H_p0*LHZy q3)M&Xd(KqD FJ AIU*uC:$=y- !9wںQo(ґcsA<8FBMEB󭺡%uhkJ{\EF(Եf2n~z6s 9Ae5eb:/e7/#ZPnoHx{e'TA#Seح% B:\{Ŝ牙c2KuܷOڌ^DX̒n@A,;VbVcكQcS &Z23wz 8PJI.&!b!QHeɰ[PvI@ >(酡Vrbo0'#kR Y:QC _MGW/6hBxvה0qBhF/R($)lpOcɡ&e_5FƱmJ YE^ad 1ZEco01M&=ZcC-g2545 "Od^{ <z/2yWJςNіݞ}%O%sf $pF=Ll}ᗢ ?-r4@ X%XQ\,=Z;E|d}^;e>}w;}wGL veR#W0Fk(JsT#P5+#I+SI-z;R 9R{'xJH/2^zKr/`{{WX` j&v> h _;nR"cQs kc>*QZH/5z$wC)LW?L7Qz$wBi~n)7O>oQv6tМbWRbxЌ4V޺H[oow6yiX~J׍Fҫ1XF(h8mLL)yw?N*%YӼkZB#M-SԆZ#HsIUB67M OMs,,9\\6H&VE GT֞|~hI}o ̡%Dmx\6$<6ˌ10#|bZˆQE>@^uo nE.#jߦύN@&Yl)ϼ"M4P.Q1+T =bBjMUegd=ro{.Z\ؿ̓#T4Yq2ņ !rG.Eɉ5c_gښ۸_alP_\J$vo0X"^l+[OcHIÛpnTŶun `SLIv5VJ'^Lk5K|^Y` 'a;SP_}6tn ,˒'¡`'$%S!,ɬL>ݘܕ7&/~K@sJ]0J\脁'l<\S iY#f ǧK;*ʹMdB$ .$!:\S-xcl?D(ڙhL5)sB2ܰBj& 3 Ove`03I!6KQ1@kNquҋs3KMƁO `v<suI.`]0i](">WƧPR4?s'OF.8I+)2ScFvyoj;*|5 #8l<E%(QwĶo#Sy&`"~7đLy<E0%k\"PLz5.0D>pVJWK8%v"GV(gj\"'5nj\©E/>1WKBԋp* ©'`y\ S& ǔ7A;W&#ȗT#Λ G@!tTyQрS9:LcD瞉CV|3"HD#E-/HIw2fְD<oL+IEsE ]o9 .v@qK8J_r)v^}^@u=-_!O*?oo<}vo7S"2 po&L&]Sff J" R$vR g M8(DݝWb v8Gd2f|֟]wOOV<њ;pRPK` ]"P0=lMd@ r 8I=laO:J<BrchUsS+ 7(aiJ羭F n 2el+6՝{Y ]"]#FkfNڥmfE } ]He q DrbfdE4dFJrr ywn3S飵"D,YKJS,ٟMP%Vb -ݒS!BV L<-sr3)>%A; U6>=y;t} ~~-6MȦowA^I8A7'gz#7-&ArRd‵hb1RϿl>O0}} 8 Wk hY|/} ;/C1? X2n:O6I*4,T ur.?gX2CQrjҸ?`xgŝeC;`J)qƟ/4YV?_˧vw6A;q̒n[3+3Ǘw;zhc zRaABz9𢼪qrESEsxύ{/.hq@V'7.T@K >8_ BRUāx ʜnlWP;1jU 3)`M!ڔ_̩IQ*Mq3S-OycRNVoeVS",5/0+DxO1hQLH1DHX.ȕL97 ? +)H ? g֝* RTu)"߿Q.iY C9iN6$`mZMC0#1G+Y3EAzRTu)ҁ朲#Q-VH\A)#ÒLSRIpJI*(ʕ<6䢰+7W\|^z`直8m|g rO\ XnS:`RcߟO+$)Ck9l_AL%CίˍY} LoO$i2 "߻2]# 1PUhN`K9`,O!&x|`I"%MKx {fuR 7u9X^œ% C~&OKli>m-fp -$Zu @HIj@[ `ݶMքm_p 8;' nfWJV(|hW;;,_|:}6׃ ކ#a6&W''7o 2Oޠ wBlD"8%u5(0>#R|D'#| Y3(_Mo|n\`˖.a0<Amj|#49=**lvrֹ>7\5QRkc U-0 M[>v:@ 'GB@Xis{Q*sʬq5f2(+ ?kͨjt}t翾R*{"9ߘ5IF(y6 |T,^nM&M1x{oxu4ʂI5f-Y5+cj糅vU߱'܎ݴ`6i.% aQ'yk:fs킑4n//`.,Bz䷤K7+rqvV|"ZI>ݼmAU Dt#Fug{O1!igڭ EL1uvJvAT}GvЋH6n[2%s‹+gozͧ>n|OL"ՋNX!#,߽B<{ENs tpt{~^|<>,lSKZ%%bId -5%e +c$lc~0, *)\vʔzΪkեD-VO(Ei3峵lkRJE&)$aĐ_x/7ځ;n't47by8뾍.pYԡw~5e tv@Nz8 B ENz|frww58DCzf4͍ˉh 9RB,ulg)"dX Hh,[;QqΎFfF4URJ\;&p.!N1)qbF#(p|Y=‚XK쨣@I,%i (1@IՕZK,,5XsNRJf~q)"RN)f<ΙzD UFs]d r˜R7s)9ةrCgɔcI%Td S(!m¤c6O$+)l6CG+Cek.t ]G2ňw?'-ve,^\<|mn86s!ΑlЋGн(:kcPfv/쩋=X *jfTo`6Z4eC]s= c $OmֵTHzA*Ì"sT(7ZgN3H;FL;qZ]rB.@hZ_et>Kdﲪ vR%ǙjG9KG2iC"RKj,F),q@l2=h+Epo([|1n,mc  \ N2vH?E$6.X?wv56dDȷz#7ܥ\ ?{Wƍ K_6tU{vvˮ$~k^0bYRUCJ3kGFF~Y%jM|GkXovvu]<]ju}zܿh2/&4=.fzD~Zpu[7>l_^6Q2Aoݕy%ɝ*H$+U4L%6E:9"Ǒƹb&ď/RWy#b.v94۶@[))֮ ڪ߲yBBVo}{5,,pjI eDiPcB P†އ.( ƠEۇy]#`#hID$MRæas+,PTW8)g"`҃SvfA_io.o~"d=KbDCk|'=х90U Lm|( ԟ2500k;/ղ Hf:=Mao1xLLB(ORc`Zqy߾fc>!D2Vb$nhcsdJĹ e h%UjXOXe@Nf#Tت̹3sܬc"spߍĤ(jD1$߭تbG-Iλ$#-~NLXK hmq0Zd.lIJW $-ᑫĵ=aʑ$JI^8FI5qkceEn"L4%HY3cS#KRWGR#VZ73A.[cC6 0p?x}a;t$M \dZ`o<\]_;==wBnQ?{7XV3n;ż#@|)JZWZW 4+o]~= ~[ﶣ:t#c6uHo"^f)BlD5!5^?46D"﮸U%:(jxG%|SKpGYK{ |pAέX-FK5,9 6v0ΊG`V~JN}4骿׫Vbrw>S9=v0;4>B6Ztҡ:tf#kKPhRm̦*NJq' ' x t>v>ʭ}0%)J!nOQGc:Ek3!s>Ei }(mqm $Un_rBVg˥179lf7ίܾo\uIG|֏4et{Oo_ gvs;/5{Ig< !z;5kS*ݝ{TRWP9:5k|Dj+`M谅 CP FJ9,5an1tJA{h}R&ԄQT0JQH >+KM/1J% k1?Q*OH}VLn10r >+K̄QxJ;}D>Im u2JLjRTa(E[Fr(E\j9ťF0J^I"JI}V1RE0t(& Gxl(& >t( C)祚A( nşDPla;zn:TeK~V2~/[\vشwbrP|+ K+Q5y_˫ +Yi3%ұyQgl+@ M{TW"OloI8}t㔩_o./ƃL;Fo(Lw:?8-qdq &j>an* #4$zv'GԲ'qji8*lqrnB lQh}Fu,eqUn:\ f`pH#^7"PXLVqBTPf"8D Se$U t?Wgޝ,Dc"L- ǩ8mvk 'HqOq4Vz9/ND" &͸AY '* $4sHʈYF:$eHRL~hu~V%NQN#6n/VOv*O|Vń JSK#&Q1#1cc'<ˤp%&o$"8J̳5G@P]nvT`h.u ^ZC9 `jeJQy&-U O D0\e*6&`E.J8Hۥyx{"r t|yu?ӳv_8HWr7wy) 1nL$^YZ?;"_x9gW'Η,oVsF~'_ =?K`pfP\1'f׍>\hbgB7x2hK.?yM3n>Qj'WOu&VT0d\*]"-8V Hkq 8dw6r;WwjE]|;w5+^Bv؂4rIwG Kϩ4Q,lVy AVTSVvj&U#j[GFW_/-i.ӵMy.k=./]v]ܺ'gP\d6NU?ݹg{kQ"K>xԮ/hq\2q_~9wޟoX9,GS@5.-p;g{yY}60^llz(;XhD}Ʊ Q6Ԇ)6]6-z=6##7 [?/ ~؁K-IXI@||1zY+_]Mcg?/7bY3)rV'/DWX Z^l{kpʂn֔mCl,5q9 9-.`,+@iXqM.F)@Jr6%R0R-Q+>Ăhcgeb(Soܘ! $526ґ4޵(Rvezj>$5BLrK?`b#KiRE*013mD:e@3 ;{_G>l.,PLv}w5uu< ($xjDC&9|&)\{TS5,t?O2_{ZC )>*@ů$.@GweuI :N晶lɸqn8c<%ƍGZ,ܮ P΍p1LOԌ@X /4yFh!qhru_TKaW|ԓKZA^ez1,>C$]$7yAƽ!'&q 6,ˍ=^$sE>0&M$%gN,v~bqsqD<]:vs;\~qC$󫹌,c$Ϭ4Ln3|1 rq$k(Sd6D:$l3ONu~0;Ly~짜`c ] ]66X9[}eС 7y;;BSMu(;eh̽Vb!4J5%A*kUh,M*eĩIeܭ 1iآ2(@SVH}F~49!D%nN%g) Xa D2:0VJETEYB#؂ n*)[)xYkb`-x ?τэ;4RV7"*(ԃvCXj@ōaD/<(0)kn>h͒ҭ@ 1(0nU>>dlAZRCQƐz$2PTX(z+_8R+0DCM.y(MR!Ўq۔]1TdO 0z9d]9V>>QpQ9e`ri`Ey7i B -7®h ?Zn;ΫB%T)Yc i5$~C a'$;c!0H?uJf)0&dl\B8%[! z#J"I;J=K0%έX-F+l,*)1nugţnճָUJ0=&U!dZ'Z4..0b{ֶT5j7 fӮ1$d=q HH8@jDGkUЁ<_oThh-,GJj6t?P *\gn(_%{l)Z-=(CδA6`|͢n}u> HLb.m%'m Hָ91h&,q WT.ǯ[oH#Ƹs5115-KMܑMD~~$oSMb-e J*c7 Axn@#,cXgo4H)0w?Fv+{\~.9he pGb3# z  ZK< IL<T RIS(YY@G'?]G ؅OL/ib1MYG#LnP6^\z_}?߼}Y7)Yt6|qo.֟hſ$#M׻? zl[7]"Qn<F#Xt͢g#[ Ż%,s9im! c%ւeD}`~o_f@'ՆRP69>fm7>r(_{fјd{QFi{;Q(,#ekQZ+2 x4CkrR1 I(cېPT#/cr^8ʰ_aUV.Z_%~2%hIn~).2юoskOF_dt6`uh/V:=2Z s8O)mdF=)ERF]'+U&џRjNǷHWQJ,VŠ/;ˤQx%nYDAD%r$*rH 4(-j/*h`Q!td7:{ P4!&ψXyT'OEd͈*M(BK> aeCFY1-JZBP&(*Ҁ UL%t5ZyJnhxq0nTxb (\bБJXJ@*}!*6UqUiFH -JLM )[BBh˭IQD&[H,E"2B20eDy VYVl 2"%d]ᒤAaT'hRւf&Y X} pHy L |`aVb)Rb+x>2f$,]Naᔁr)@3Nv!;Hh}d7Ӑ;_$\%P~:Ҿu nװ5MG ["i2$LBx_W;if?w.P+ZO*M$5k-u1$Y9Z}7Zf9]#z|HYPҠʋt}2xE|yy٭]ʋtDa5X^d>+NT^XHC>w+cƯ2<)8VJ# V#^qkk}=}yHMTay j.|P6;+|P򆭅L!'߬8َۜG ;]mAW]O(@TRo/<ޢCʞAfz]ˬ[ɩq_9Ѽa% QYCRf W@K6Ū.XesAwaj5/>.~sr4\Cb3%G%yv3o-Ć-T-Ri[!Qk*> a!`gvuw֎|4єW賋igyX0k2V1^P͞\m##8 T=Eo":pj A d2 c Ug0~ c=?[0|U3Uͅ7DFYS8y+DE[<<<#U{~cs\<,\}/Bu2B2O[kE 65`A\8lfZ=e%{Zg4Ij?I cVe%R CmdIbfMV{tJ ')3y?6 ¿W7/..Թ|sLU1D -G(Z̘ŧ8$0.kR $0JHV9/y*5"ANyv1%˕jx${k7 +$ p:w7Xp7^%x>K5Ѵp .3Ƌ_ٔ6jm:Fcو4y.LVTfVhCU\af. ]-ʠpJU1U%FT A$\jB41 e)T"KOҒFDh‰kUjD7VPwd:&cuŖ2GUjJi=RI'SjͬfKM v&Vf1DhUjb8x;A@B+51bUVNUW(j]jN P+߮o?ua#9MfKpqMU2*X-~l4񙟶M`/u7=\ߔ{ Y=!n{L~~y% {}Oa?/<ʥz&l˞T2/6YjCj쥧[ȇ}R_mJmS{ғR,R%+-lXk`J^*4]2l++TZJ `bsD̆T\&ǖnUB9'զV9>!М(dKK})5Vͳ.RЫq=t*N5wʧ9Pm Fb. Z)z>\6E<Ї.>ɢ(P3!:x"im섈$Ƨ!:u!wGI)'磝tˣ-2 O`DQYGIj Jʐ&I=2l@cMطI)*TCVjSj+)ȷe^ f(K$q0t^ .UsҒ9 /UKeKOKpՌoQx)֪?DjsGI{*t#1&ҢܹQZ2I,ZOvar/home/core/zuul-output/logs/kubelet.log0000644000000000000000005065472615134203370017712 0ustar rootrootJan 21 15:24:17 crc systemd[1]: Starting Kubernetes Kubelet... Jan 21 15:24:17 crc restorecon[4761]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:17 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 21 15:24:18 crc restorecon[4761]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 21 15:24:18 crc restorecon[4761]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Jan 21 15:24:18 crc kubenswrapper[5021]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 21 15:24:18 crc kubenswrapper[5021]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Jan 21 15:24:18 crc kubenswrapper[5021]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 21 15:24:18 crc kubenswrapper[5021]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 21 15:24:18 crc kubenswrapper[5021]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Jan 21 15:24:18 crc kubenswrapper[5021]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.571688 5021 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574103 5021 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574117 5021 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574124 5021 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574129 5021 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574133 5021 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574136 5021 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574140 5021 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574144 5021 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574147 5021 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574151 5021 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574155 5021 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574158 5021 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574162 5021 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574165 5021 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574169 5021 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574172 5021 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574175 5021 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574180 5021 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574183 5021 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574187 5021 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574190 5021 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574199 5021 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574203 5021 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574207 5021 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574211 5021 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574215 5021 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574219 5021 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574223 5021 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574227 5021 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574231 5021 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574234 5021 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574238 5021 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574242 5021 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574246 5021 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574249 5021 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574253 5021 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574256 5021 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574259 5021 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574263 5021 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574267 5021 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574270 5021 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574275 5021 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574279 5021 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574283 5021 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574287 5021 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574291 5021 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574295 5021 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574299 5021 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574302 5021 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574306 5021 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574310 5021 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574313 5021 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574317 5021 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574320 5021 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574323 5021 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574328 5021 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574333 5021 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574338 5021 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574343 5021 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574347 5021 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574350 5021 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574354 5021 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574358 5021 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574361 5021 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574365 5021 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574369 5021 feature_gate.go:330] unrecognized feature gate: Example Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574373 5021 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574376 5021 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574380 5021 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574383 5021 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.574386 5021 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574460 5021 flags.go:64] FLAG: --address="0.0.0.0" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574468 5021 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574481 5021 flags.go:64] FLAG: --anonymous-auth="true" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574486 5021 flags.go:64] FLAG: --application-metrics-count-limit="100" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574492 5021 flags.go:64] FLAG: --authentication-token-webhook="false" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574496 5021 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574502 5021 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574507 5021 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574511 5021 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574515 5021 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574520 5021 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574524 5021 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574528 5021 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574532 5021 flags.go:64] FLAG: --cgroup-root="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574536 5021 flags.go:64] FLAG: --cgroups-per-qos="true" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574540 5021 flags.go:64] FLAG: --client-ca-file="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574544 5021 flags.go:64] FLAG: --cloud-config="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574550 5021 flags.go:64] FLAG: --cloud-provider="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574554 5021 flags.go:64] FLAG: --cluster-dns="[]" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574560 5021 flags.go:64] FLAG: --cluster-domain="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574564 5021 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574568 5021 flags.go:64] FLAG: --config-dir="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574573 5021 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574577 5021 flags.go:64] FLAG: --container-log-max-files="5" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574582 5021 flags.go:64] FLAG: --container-log-max-size="10Mi" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574586 5021 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574591 5021 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574595 5021 flags.go:64] FLAG: --containerd-namespace="k8s.io" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574600 5021 flags.go:64] FLAG: --contention-profiling="false" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574605 5021 flags.go:64] FLAG: --cpu-cfs-quota="true" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574609 5021 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574613 5021 flags.go:64] FLAG: --cpu-manager-policy="none" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574617 5021 flags.go:64] FLAG: --cpu-manager-policy-options="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574623 5021 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574627 5021 flags.go:64] FLAG: --enable-controller-attach-detach="true" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574632 5021 flags.go:64] FLAG: --enable-debugging-handlers="true" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574636 5021 flags.go:64] FLAG: --enable-load-reader="false" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574640 5021 flags.go:64] FLAG: --enable-server="true" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574644 5021 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574650 5021 flags.go:64] FLAG: --event-burst="100" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574654 5021 flags.go:64] FLAG: --event-qps="50" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574658 5021 flags.go:64] FLAG: --event-storage-age-limit="default=0" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574662 5021 flags.go:64] FLAG: --event-storage-event-limit="default=0" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574666 5021 flags.go:64] FLAG: --eviction-hard="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574672 5021 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574676 5021 flags.go:64] FLAG: --eviction-minimum-reclaim="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574680 5021 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574685 5021 flags.go:64] FLAG: --eviction-soft="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574688 5021 flags.go:64] FLAG: --eviction-soft-grace-period="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574692 5021 flags.go:64] FLAG: --exit-on-lock-contention="false" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574696 5021 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574701 5021 flags.go:64] FLAG: --experimental-mounter-path="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574705 5021 flags.go:64] FLAG: --fail-cgroupv1="false" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574709 5021 flags.go:64] FLAG: --fail-swap-on="true" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574713 5021 flags.go:64] FLAG: --feature-gates="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574718 5021 flags.go:64] FLAG: --file-check-frequency="20s" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574722 5021 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574727 5021 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574732 5021 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574736 5021 flags.go:64] FLAG: --healthz-port="10248" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574740 5021 flags.go:64] FLAG: --help="false" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574744 5021 flags.go:64] FLAG: --hostname-override="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574748 5021 flags.go:64] FLAG: --housekeeping-interval="10s" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574752 5021 flags.go:64] FLAG: --http-check-frequency="20s" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574756 5021 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574760 5021 flags.go:64] FLAG: --image-credential-provider-config="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574764 5021 flags.go:64] FLAG: --image-gc-high-threshold="85" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574768 5021 flags.go:64] FLAG: --image-gc-low-threshold="80" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574772 5021 flags.go:64] FLAG: --image-service-endpoint="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574776 5021 flags.go:64] FLAG: --kernel-memcg-notification="false" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574779 5021 flags.go:64] FLAG: --kube-api-burst="100" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574784 5021 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574788 5021 flags.go:64] FLAG: --kube-api-qps="50" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574792 5021 flags.go:64] FLAG: --kube-reserved="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574796 5021 flags.go:64] FLAG: --kube-reserved-cgroup="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574800 5021 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574804 5021 flags.go:64] FLAG: --kubelet-cgroups="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574809 5021 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574813 5021 flags.go:64] FLAG: --lock-file="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574817 5021 flags.go:64] FLAG: --log-cadvisor-usage="false" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574822 5021 flags.go:64] FLAG: --log-flush-frequency="5s" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574826 5021 flags.go:64] FLAG: --log-json-info-buffer-size="0" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574832 5021 flags.go:64] FLAG: --log-json-split-stream="false" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574836 5021 flags.go:64] FLAG: --log-text-info-buffer-size="0" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574840 5021 flags.go:64] FLAG: --log-text-split-stream="false" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574844 5021 flags.go:64] FLAG: --logging-format="text" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574848 5021 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574852 5021 flags.go:64] FLAG: --make-iptables-util-chains="true" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574856 5021 flags.go:64] FLAG: --manifest-url="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574860 5021 flags.go:64] FLAG: --manifest-url-header="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574866 5021 flags.go:64] FLAG: --max-housekeeping-interval="15s" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574870 5021 flags.go:64] FLAG: --max-open-files="1000000" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574875 5021 flags.go:64] FLAG: --max-pods="110" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574879 5021 flags.go:64] FLAG: --maximum-dead-containers="-1" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574884 5021 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574888 5021 flags.go:64] FLAG: --memory-manager-policy="None" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574892 5021 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574896 5021 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574900 5021 flags.go:64] FLAG: --node-ip="192.168.126.11" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574918 5021 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574927 5021 flags.go:64] FLAG: --node-status-max-images="50" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574931 5021 flags.go:64] FLAG: --node-status-update-frequency="10s" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574936 5021 flags.go:64] FLAG: --oom-score-adj="-999" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574940 5021 flags.go:64] FLAG: --pod-cidr="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574944 5021 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574950 5021 flags.go:64] FLAG: --pod-manifest-path="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574954 5021 flags.go:64] FLAG: --pod-max-pids="-1" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574958 5021 flags.go:64] FLAG: --pods-per-core="0" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574962 5021 flags.go:64] FLAG: --port="10250" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574966 5021 flags.go:64] FLAG: --protect-kernel-defaults="false" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574970 5021 flags.go:64] FLAG: --provider-id="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574974 5021 flags.go:64] FLAG: --qos-reserved="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574978 5021 flags.go:64] FLAG: --read-only-port="10255" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574983 5021 flags.go:64] FLAG: --register-node="true" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574987 5021 flags.go:64] FLAG: --register-schedulable="true" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574992 5021 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.574999 5021 flags.go:64] FLAG: --registry-burst="10" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.575004 5021 flags.go:64] FLAG: --registry-qps="5" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.575008 5021 flags.go:64] FLAG: --reserved-cpus="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.575012 5021 flags.go:64] FLAG: --reserved-memory="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.575017 5021 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.575021 5021 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.575025 5021 flags.go:64] FLAG: --rotate-certificates="false" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.575029 5021 flags.go:64] FLAG: --rotate-server-certificates="false" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.575033 5021 flags.go:64] FLAG: --runonce="false" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.575037 5021 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.575041 5021 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.575045 5021 flags.go:64] FLAG: --seccomp-default="false" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.575049 5021 flags.go:64] FLAG: --serialize-image-pulls="true" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.575053 5021 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.575058 5021 flags.go:64] FLAG: --storage-driver-db="cadvisor" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.575062 5021 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.575066 5021 flags.go:64] FLAG: --storage-driver-password="root" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.575070 5021 flags.go:64] FLAG: --storage-driver-secure="false" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.575075 5021 flags.go:64] FLAG: --storage-driver-table="stats" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.575079 5021 flags.go:64] FLAG: --storage-driver-user="root" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.575083 5021 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.575087 5021 flags.go:64] FLAG: --sync-frequency="1m0s" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.575091 5021 flags.go:64] FLAG: --system-cgroups="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.575095 5021 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.575102 5021 flags.go:64] FLAG: --system-reserved-cgroup="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.575107 5021 flags.go:64] FLAG: --tls-cert-file="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.575110 5021 flags.go:64] FLAG: --tls-cipher-suites="[]" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.575116 5021 flags.go:64] FLAG: --tls-min-version="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.575120 5021 flags.go:64] FLAG: --tls-private-key-file="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.575124 5021 flags.go:64] FLAG: --topology-manager-policy="none" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.575129 5021 flags.go:64] FLAG: --topology-manager-policy-options="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.575133 5021 flags.go:64] FLAG: --topology-manager-scope="container" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.575137 5021 flags.go:64] FLAG: --v="2" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.575142 5021 flags.go:64] FLAG: --version="false" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.575148 5021 flags.go:64] FLAG: --vmodule="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.575152 5021 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.575157 5021 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.575281 5021 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.575285 5021 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.575291 5021 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.575295 5021 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.575300 5021 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.575305 5021 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.575309 5021 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.575314 5021 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.575319 5021 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.575323 5021 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.575327 5021 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.575331 5021 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.575336 5021 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.575342 5021 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.575346 5021 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.575351 5021 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.575355 5021 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.575359 5021 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.575363 5021 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.575368 5021 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.575371 5021 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.575872 5021 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.575881 5021 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.575885 5021 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.575890 5021 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.575894 5021 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.575898 5021 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.575914 5021 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.575918 5021 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.575922 5021 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.575925 5021 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.575930 5021 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.575934 5021 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.575938 5021 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.575941 5021 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.575945 5021 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.575949 5021 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.575952 5021 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.575956 5021 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.575960 5021 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.575964 5021 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.575973 5021 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.575978 5021 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.575982 5021 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.575986 5021 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.575989 5021 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.575993 5021 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.575997 5021 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.576001 5021 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.576007 5021 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.576012 5021 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.576016 5021 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.576019 5021 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.576023 5021 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.576027 5021 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.576030 5021 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.576034 5021 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.576037 5021 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.576041 5021 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.576044 5021 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.576048 5021 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.576051 5021 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.576055 5021 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.576059 5021 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.576064 5021 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.576068 5021 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.576071 5021 feature_gate.go:330] unrecognized feature gate: Example Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.576076 5021 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.576081 5021 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.576084 5021 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.576088 5021 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.576094 5021 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.585458 5021 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.585480 5021 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585566 5021 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585575 5021 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585581 5021 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585587 5021 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585591 5021 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585598 5021 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585603 5021 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585607 5021 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585612 5021 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585617 5021 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585621 5021 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585626 5021 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585630 5021 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585637 5021 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585642 5021 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585646 5021 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585651 5021 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585658 5021 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585664 5021 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585669 5021 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585673 5021 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585678 5021 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585683 5021 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585688 5021 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585692 5021 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585697 5021 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585702 5021 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585706 5021 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585712 5021 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585719 5021 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585724 5021 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585730 5021 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585734 5021 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585740 5021 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585744 5021 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585749 5021 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585754 5021 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585758 5021 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585763 5021 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585767 5021 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585772 5021 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585778 5021 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585782 5021 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585787 5021 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585792 5021 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585796 5021 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585802 5021 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585806 5021 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585811 5021 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585815 5021 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585820 5021 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585824 5021 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585828 5021 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585833 5021 feature_gate.go:330] unrecognized feature gate: Example Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585838 5021 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585842 5021 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585847 5021 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585852 5021 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585856 5021 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585861 5021 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585866 5021 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585871 5021 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585875 5021 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585882 5021 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585896 5021 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585901 5021 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585921 5021 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585926 5021 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585931 5021 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585936 5021 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.585940 5021 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.585948 5021 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.588726 5021 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.588800 5021 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.588813 5021 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.588825 5021 feature_gate.go:330] unrecognized feature gate: Example Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.588838 5021 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.588850 5021 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.588863 5021 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.588881 5021 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.588901 5021 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.588965 5021 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.588978 5021 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.588987 5021 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.588996 5021 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589005 5021 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589014 5021 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589023 5021 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589032 5021 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589042 5021 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589050 5021 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589058 5021 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589066 5021 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589075 5021 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589084 5021 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589093 5021 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589101 5021 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589109 5021 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589118 5021 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589159 5021 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589167 5021 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589178 5021 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589187 5021 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589195 5021 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589204 5021 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589212 5021 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589221 5021 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589229 5021 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589239 5021 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589249 5021 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589259 5021 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589269 5021 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589281 5021 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589293 5021 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589303 5021 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589313 5021 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589322 5021 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589331 5021 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589341 5021 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589350 5021 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589360 5021 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589370 5021 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589379 5021 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589387 5021 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589398 5021 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589408 5021 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589419 5021 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589429 5021 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589437 5021 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589446 5021 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589455 5021 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589463 5021 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589472 5021 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589480 5021 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589488 5021 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589496 5021 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589504 5021 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589512 5021 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589522 5021 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589531 5021 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589540 5021 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589549 5021 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.589557 5021 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.589574 5021 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.590322 5021 server.go:940] "Client rotation is on, will bootstrap in background" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.595683 5021 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.595873 5021 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.596728 5021 server.go:997] "Starting client certificate rotation" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.596780 5021 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.597039 5021 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-11-12 04:41:49.698216675 +0000 UTC Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.598165 5021 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.602288 5021 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.603898 5021 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 21 15:24:18 crc kubenswrapper[5021]: E0121 15:24:18.604392 5021 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.110:6443: connect: connection refused" logger="UnhandledError" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.612859 5021 log.go:25] "Validated CRI v1 runtime API" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.631322 5021 log.go:25] "Validated CRI v1 image API" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.633831 5021 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.636497 5021 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2026-01-21-15-18-55-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.636567 5021 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:43 fsType:tmpfs blockSize:0}] Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.660135 5021 manager.go:217] Machine: {Timestamp:2026-01-21 15:24:18.658685804 +0000 UTC m=+0.193799713 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2799998 MemoryCapacity:33654120448 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:758eea6e-3652-403c-8df7-2cf690a0b7a2 BootID:90fd653a-5482-4360-a078-7b7d7b2b9201 Filesystems:[{Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:43 Capacity:1073741824 Type:vfs Inodes:4108169 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827060224 Type:vfs Inodes:4108169 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827060224 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365408768 Type:vfs Inodes:821633 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:04:b8:e2 Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:04:b8:e2 Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:f2:03:b7 Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:8d:ab:eb Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:57:7d:80 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:e6:d1:29 Speed:-1 Mtu:1496} {Name:ens7.23 MacAddress:52:54:00:64:f4:10 Speed:-1 Mtu:1496} {Name:eth10 MacAddress:4a:ec:2a:e2:15:bc Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:8a:02:de:ac:7b:fd Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654120448 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.660447 5021 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.660696 5021 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.661577 5021 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.661809 5021 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.661857 5021 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.662153 5021 topology_manager.go:138] "Creating topology manager with none policy" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.662166 5021 container_manager_linux.go:303] "Creating device plugin manager" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.662356 5021 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.662400 5021 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.662666 5021 state_mem.go:36] "Initialized new in-memory state store" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.662884 5021 server.go:1245] "Using root directory" path="/var/lib/kubelet" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.663655 5021 kubelet.go:418] "Attempting to sync node with API server" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.663682 5021 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.663713 5021 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.663727 5021 kubelet.go:324] "Adding apiserver pod source" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.663748 5021 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.665687 5021 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.110:6443: connect: connection refused Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.665699 5021 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.110:6443: connect: connection refused Jan 21 15:24:18 crc kubenswrapper[5021]: E0121 15:24:18.665776 5021 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.110:6443: connect: connection refused" logger="UnhandledError" Jan 21 15:24:18 crc kubenswrapper[5021]: E0121 15:24:18.665796 5021 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.110:6443: connect: connection refused" logger="UnhandledError" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.666086 5021 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.666507 5021 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.667161 5021 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.667798 5021 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.667831 5021 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.667841 5021 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.667848 5021 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.667865 5021 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.667873 5021 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.667881 5021 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.667895 5021 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.667920 5021 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.667929 5021 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.667939 5021 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.667948 5021 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.668328 5021 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.668842 5021 server.go:1280] "Started kubelet" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.669088 5021 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.110:6443: connect: connection refused Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.669394 5021 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Jan 21 15:24:18 crc systemd[1]: Started Kubernetes Kubelet. Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.669836 5021 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.671686 5021 server.go:460] "Adding debug handlers to kubelet server" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.672185 5021 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.674344 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.674406 5021 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Jan 21 15:24:18 crc kubenswrapper[5021]: E0121 15:24:18.673111 5021 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.110:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.188cc85dc266ff61 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-21 15:24:18.668805985 +0000 UTC m=+0.203919894,LastTimestamp:2026-01-21 15:24:18.668805985 +0000 UTC m=+0.203919894,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.675041 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-18 13:32:58.725539994 +0000 UTC Jan 21 15:24:18 crc kubenswrapper[5021]: E0121 15:24:18.676227 5021 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.676415 5021 volume_manager.go:287] "The desired_state_of_world populator starts" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.676434 5021 volume_manager.go:289] "Starting Kubelet Volume Manager" Jan 21 15:24:18 crc kubenswrapper[5021]: E0121 15:24:18.676420 5021 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.110:6443: connect: connection refused" interval="200ms" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.676486 5021 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.680088 5021 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.110:6443: connect: connection refused Jan 21 15:24:18 crc kubenswrapper[5021]: E0121 15:24:18.680685 5021 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.110:6443: connect: connection refused" logger="UnhandledError" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.681064 5021 factory.go:55] Registering systemd factory Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.681104 5021 factory.go:221] Registration of the systemd container factory successfully Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.681681 5021 factory.go:153] Registering CRI-O factory Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.681819 5021 factory.go:221] Registration of the crio container factory successfully Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.682060 5021 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.682274 5021 factory.go:103] Registering Raw factory Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.682387 5021 manager.go:1196] Started watching for new ooms in manager Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.683971 5021 manager.go:319] Starting recovery of all containers Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.696659 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.696751 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.696775 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.696796 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.696819 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.696838 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.696858 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.696875 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.696904 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.696952 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.696975 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.696995 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.697015 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.697038 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.697056 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.697106 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.697151 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.697170 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.697187 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.697260 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.697283 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.697305 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.697328 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.697350 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.697376 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.697396 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.697421 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.697443 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.697464 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.697483 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.697591 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.697608 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.697643 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.697662 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.697683 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.697702 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.697719 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.697747 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.697768 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.697797 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.697818 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.697836 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.697854 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.697872 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.697892 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.697958 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.697979 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.698006 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.698029 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.698053 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.698079 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.698103 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.698134 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.698158 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.698181 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.698202 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.698228 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.698252 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.698266 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.698289 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.698309 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.698332 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.698350 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.698377 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.698394 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.698410 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.698430 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.698446 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.698469 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.698483 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.698498 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.698515 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.698532 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.698553 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.698567 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.698598 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.698621 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.698636 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.698710 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.698729 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.698802 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.698818 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.698840 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.698856 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.698872 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.701495 5021 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.701630 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.701680 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.701704 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.701770 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.701810 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.701866 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.701906 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.701973 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.702025 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.702083 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.702111 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.702171 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.702226 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.702268 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.702303 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.702338 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.702386 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.702405 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.702429 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.702468 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.702537 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.702602 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.702644 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.702671 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.702713 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.702836 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.702882 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.703023 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.703065 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.703086 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.703116 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.703135 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.703153 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.703225 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.703245 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.703277 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.703300 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.703332 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.703382 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.703605 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.703649 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.703669 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.703738 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.703760 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.703793 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.703838 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.703927 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.703973 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.704029 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.704752 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.704895 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.704955 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.705002 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.705030 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.705117 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.705145 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.705302 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.705343 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.705379 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.705425 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.705564 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.705621 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.705666 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.705750 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.705780 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.705801 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: E0121 15:24:18.705362 5021 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.110:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.188cc85dc266ff61 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-21 15:24:18.668805985 +0000 UTC m=+0.203919894,LastTimestamp:2026-01-21 15:24:18.668805985 +0000 UTC m=+0.203919894,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.705838 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.705893 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.705954 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.706056 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.706086 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.706128 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.706148 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.706170 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.706315 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.706352 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.706394 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.706454 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.706480 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.706527 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.706565 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.706622 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.706664 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.706695 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.706788 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.706817 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.706858 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.706880 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.706990 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.707016 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.707054 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.707083 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.707195 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.707230 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.707251 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.707280 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.707304 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.707321 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.707353 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.707445 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.707468 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.707497 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.707513 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.707557 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.707610 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.707657 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.707683 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.707707 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.707767 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.707787 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.707838 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.707867 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.707888 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.707982 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.708000 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.708015 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.708059 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.708078 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.708141 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.708161 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.708177 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.708198 5021 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.708215 5021 reconstruct.go:97] "Volume reconstruction finished" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.708233 5021 reconciler.go:26] "Reconciler: start to sync state" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.714784 5021 manager.go:324] Recovery completed Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.728360 5021 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.731302 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.731360 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.731377 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.732768 5021 cpu_manager.go:225] "Starting CPU manager" policy="none" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.732806 5021 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.732838 5021 state_mem.go:36] "Initialized new in-memory state store" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.734467 5021 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.736472 5021 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.736538 5021 status_manager.go:217] "Starting to sync pod status with apiserver" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.736568 5021 kubelet.go:2335] "Starting kubelet main sync loop" Jan 21 15:24:18 crc kubenswrapper[5021]: E0121 15:24:18.736759 5021 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Jan 21 15:24:18 crc kubenswrapper[5021]: W0121 15:24:18.740103 5021 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.110:6443: connect: connection refused Jan 21 15:24:18 crc kubenswrapper[5021]: E0121 15:24:18.740277 5021 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.110:6443: connect: connection refused" logger="UnhandledError" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.741895 5021 policy_none.go:49] "None policy: Start" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.742801 5021 memory_manager.go:170] "Starting memorymanager" policy="None" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.742922 5021 state_mem.go:35] "Initializing new in-memory state store" Jan 21 15:24:18 crc kubenswrapper[5021]: E0121 15:24:18.776808 5021 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.785861 5021 manager.go:334] "Starting Device Plugin manager" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.785931 5021 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.785945 5021 server.go:79] "Starting device plugin registration server" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.786399 5021 eviction_manager.go:189] "Eviction manager: starting control loop" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.786417 5021 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.786775 5021 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.786960 5021 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.786971 5021 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Jan 21 15:24:18 crc kubenswrapper[5021]: E0121 15:24:18.794410 5021 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.837731 5021 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc"] Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.837834 5021 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.839044 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.839078 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.839088 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.839198 5021 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.839439 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.839480 5021 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.839790 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.839819 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.839828 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.839967 5021 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.840074 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.840098 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.840106 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.840386 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.840487 5021 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.841058 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.841080 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.841090 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.841215 5021 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.841556 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.841623 5021 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.841677 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.841699 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.841708 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.841976 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.842004 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.842015 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.842124 5021 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.842353 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.842446 5021 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.842558 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.842589 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.842603 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.843149 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.843222 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.843244 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.843464 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.843498 5021 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.843751 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.843795 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.843818 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.844718 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.844775 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.844796 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:18 crc kubenswrapper[5021]: E0121 15:24:18.877755 5021 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.110:6443: connect: connection refused" interval="400ms" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.886534 5021 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.887358 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.887423 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.887444 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.887495 5021 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 21 15:24:18 crc kubenswrapper[5021]: E0121 15:24:18.888321 5021 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.110:6443: connect: connection refused" node="crc" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.911736 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.911775 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.911800 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.911846 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.913197 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.913441 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.913601 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.913751 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.913942 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.914096 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.914241 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.914375 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.914534 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.914679 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 21 15:24:18 crc kubenswrapper[5021]: I0121 15:24:18.914830 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.016484 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.016569 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.016600 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.016625 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.016650 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.016749 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.016777 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.016813 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.016810 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.016761 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.016853 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.016947 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.017101 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.017041 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.017133 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.017152 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.017171 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.017189 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.017219 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.017223 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.017236 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.017260 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.017279 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.017294 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.017287 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.017318 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.017304 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.017285 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.017387 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.017543 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.088556 5021 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.090585 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.090650 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.090662 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.090702 5021 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 21 15:24:19 crc kubenswrapper[5021]: E0121 15:24:19.091495 5021 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.110:6443: connect: connection refused" node="crc" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.161319 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.170782 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Jan 21 15:24:19 crc kubenswrapper[5021]: W0121 15:24:19.183468 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-cdeff013861bebd245b5fe9a205981e7388ddc7c0db73d009d50095874ac0bad WatchSource:0}: Error finding container cdeff013861bebd245b5fe9a205981e7388ddc7c0db73d009d50095874ac0bad: Status 404 returned error can't find the container with id cdeff013861bebd245b5fe9a205981e7388ddc7c0db73d009d50095874ac0bad Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.188594 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.210319 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.218128 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 21 15:24:19 crc kubenswrapper[5021]: W0121 15:24:19.235981 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-a1af02844f0404b4f41f8404f96f68206a1c2d9a39cda8f3b8bdfc8264891e0d WatchSource:0}: Error finding container a1af02844f0404b4f41f8404f96f68206a1c2d9a39cda8f3b8bdfc8264891e0d: Status 404 returned error can't find the container with id a1af02844f0404b4f41f8404f96f68206a1c2d9a39cda8f3b8bdfc8264891e0d Jan 21 15:24:19 crc kubenswrapper[5021]: W0121 15:24:19.245152 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-b6b582a80d9320cd228f728614e02d9588c7bb055155737396b3d494a106d0b0 WatchSource:0}: Error finding container b6b582a80d9320cd228f728614e02d9588c7bb055155737396b3d494a106d0b0: Status 404 returned error can't find the container with id b6b582a80d9320cd228f728614e02d9588c7bb055155737396b3d494a106d0b0 Jan 21 15:24:19 crc kubenswrapper[5021]: E0121 15:24:19.279304 5021 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.110:6443: connect: connection refused" interval="800ms" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.492452 5021 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.493605 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.493643 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.493654 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.493719 5021 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 21 15:24:19 crc kubenswrapper[5021]: E0121 15:24:19.494220 5021 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.110:6443: connect: connection refused" node="crc" Jan 21 15:24:19 crc kubenswrapper[5021]: W0121 15:24:19.668185 5021 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.110:6443: connect: connection refused Jan 21 15:24:19 crc kubenswrapper[5021]: E0121 15:24:19.668472 5021 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.110:6443: connect: connection refused" logger="UnhandledError" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.670437 5021 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.110:6443: connect: connection refused Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.675202 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-15 04:26:29.457272957 +0000 UTC Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.743741 5021 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="19a3d19b378e54ad8ff325f81e065e19a20aad97b002393619e9165653d75a78" exitCode=0 Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.743796 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"19a3d19b378e54ad8ff325f81e065e19a20aad97b002393619e9165653d75a78"} Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.743936 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"b6b582a80d9320cd228f728614e02d9588c7bb055155737396b3d494a106d0b0"} Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.744078 5021 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.747394 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.747445 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.747458 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.747779 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"f805306703ac31a2921b4d3bfd202ef7c3c71e5954c987ad3b4bbd4827b46568"} Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.747859 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"a1af02844f0404b4f41f8404f96f68206a1c2d9a39cda8f3b8bdfc8264891e0d"} Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.748930 5021 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9" exitCode=0 Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.749001 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9"} Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.749024 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"9ecec41bceb4d5560061773eb1ea31b9420d54b41f846f8963168ef5d09be591"} Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.749104 5021 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.749851 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.749883 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.749893 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.750843 5021 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="b79fe3d315f973b92fcbd896ee2fe852b7e80893bd51df3f4802fe230421322f" exitCode=0 Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.750900 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"b79fe3d315f973b92fcbd896ee2fe852b7e80893bd51df3f4802fe230421322f"} Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.750941 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"e01c893161ec2391afb2ce21b9992aaf58c20154fbc8947dcc717f2a3d2b1e44"} Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.751025 5021 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.751195 5021 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.751689 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.751715 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.751724 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.752216 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.752240 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.752243 5021 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="c1281a6adaf53724c25cf50f5b8fda21600c3813c1ca31f707ac23493b45af27" exitCode=0 Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.752251 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.752263 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"c1281a6adaf53724c25cf50f5b8fda21600c3813c1ca31f707ac23493b45af27"} Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.752281 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"cdeff013861bebd245b5fe9a205981e7388ddc7c0db73d009d50095874ac0bad"} Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.752328 5021 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.752961 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.753012 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:19 crc kubenswrapper[5021]: I0121 15:24:19.753021 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:19 crc kubenswrapper[5021]: W0121 15:24:19.928503 5021 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.110:6443: connect: connection refused Jan 21 15:24:19 crc kubenswrapper[5021]: E0121 15:24:19.928580 5021 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.110:6443: connect: connection refused" logger="UnhandledError" Jan 21 15:24:20 crc kubenswrapper[5021]: W0121 15:24:20.051588 5021 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.110:6443: connect: connection refused Jan 21 15:24:20 crc kubenswrapper[5021]: E0121 15:24:20.051683 5021 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.110:6443: connect: connection refused" logger="UnhandledError" Jan 21 15:24:20 crc kubenswrapper[5021]: E0121 15:24:20.080219 5021 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.110:6443: connect: connection refused" interval="1.6s" Jan 21 15:24:20 crc kubenswrapper[5021]: W0121 15:24:20.242795 5021 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.110:6443: connect: connection refused Jan 21 15:24:20 crc kubenswrapper[5021]: E0121 15:24:20.242925 5021 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.110:6443: connect: connection refused" logger="UnhandledError" Jan 21 15:24:20 crc kubenswrapper[5021]: I0121 15:24:20.294614 5021 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 15:24:20 crc kubenswrapper[5021]: I0121 15:24:20.297587 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:20 crc kubenswrapper[5021]: I0121 15:24:20.297716 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:20 crc kubenswrapper[5021]: I0121 15:24:20.297731 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:20 crc kubenswrapper[5021]: I0121 15:24:20.297878 5021 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 21 15:24:20 crc kubenswrapper[5021]: E0121 15:24:20.299936 5021 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.110:6443: connect: connection refused" node="crc" Jan 21 15:24:20 crc kubenswrapper[5021]: I0121 15:24:20.675860 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-14 08:35:01.691050943 +0000 UTC Jan 21 15:24:20 crc kubenswrapper[5021]: I0121 15:24:20.732388 5021 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 21 15:24:20 crc kubenswrapper[5021]: I0121 15:24:20.756362 5021 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="3a18ff277d97036930e36d290c3abae4a918cddb2353e40d5618332a9e9e6362" exitCode=0 Jan 21 15:24:20 crc kubenswrapper[5021]: I0121 15:24:20.756437 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"3a18ff277d97036930e36d290c3abae4a918cddb2353e40d5618332a9e9e6362"} Jan 21 15:24:20 crc kubenswrapper[5021]: I0121 15:24:20.756597 5021 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 15:24:20 crc kubenswrapper[5021]: I0121 15:24:20.757477 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:20 crc kubenswrapper[5021]: I0121 15:24:20.757502 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:20 crc kubenswrapper[5021]: I0121 15:24:20.757513 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:20 crc kubenswrapper[5021]: I0121 15:24:20.758109 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"7ab3b325feadaad4a609975f31dc653f307a9b319e8b3d24269a134aa0fa596c"} Jan 21 15:24:20 crc kubenswrapper[5021]: I0121 15:24:20.758228 5021 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 15:24:20 crc kubenswrapper[5021]: I0121 15:24:20.759181 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:20 crc kubenswrapper[5021]: I0121 15:24:20.759212 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:20 crc kubenswrapper[5021]: I0121 15:24:20.759223 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:20 crc kubenswrapper[5021]: I0121 15:24:20.761214 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"22e5bf8c1e3e1a89326ac8ad0bb8027da467c25709d34c16f81fd8915cc0cf76"} Jan 21 15:24:20 crc kubenswrapper[5021]: I0121 15:24:20.761243 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"60ce022aa50e5962258046fb06f1008a15dab87927995d4308d0c2dc75fb9d9d"} Jan 21 15:24:20 crc kubenswrapper[5021]: I0121 15:24:20.761256 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"0650e3b31b409afa68b74eb066029d1ac832f14615432265ce968cbe9b89c78a"} Jan 21 15:24:20 crc kubenswrapper[5021]: I0121 15:24:20.761331 5021 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 15:24:20 crc kubenswrapper[5021]: I0121 15:24:20.761943 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:20 crc kubenswrapper[5021]: I0121 15:24:20.761982 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:20 crc kubenswrapper[5021]: I0121 15:24:20.761996 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:20 crc kubenswrapper[5021]: I0121 15:24:20.763103 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"effb345da8dd5ec896b8b390b24c11d5fafc667b987d984ea5df7647068c4f46"} Jan 21 15:24:20 crc kubenswrapper[5021]: I0121 15:24:20.763128 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"4543419f7cc5df1a3788158c9b4540bb0e5320349e033d6a4a495267a84f5dd8"} Jan 21 15:24:20 crc kubenswrapper[5021]: I0121 15:24:20.763140 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"e1f9f8b3712320f6149f9124f2c3d85a9957a64678d7f1d0a11728c00afc74cc"} Jan 21 15:24:20 crc kubenswrapper[5021]: I0121 15:24:20.763194 5021 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 15:24:20 crc kubenswrapper[5021]: I0121 15:24:20.763783 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:20 crc kubenswrapper[5021]: I0121 15:24:20.763804 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:20 crc kubenswrapper[5021]: I0121 15:24:20.763812 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:20 crc kubenswrapper[5021]: I0121 15:24:20.765530 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"06cf2acfbc236dc530bb69dff9e6e83755c426daba101b6b6abe9c23684d2b7d"} Jan 21 15:24:20 crc kubenswrapper[5021]: I0121 15:24:20.765565 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"10f4a9dbab7b11fa3eaaae6060df484ff7e212710188c29acc1834f07310d49f"} Jan 21 15:24:20 crc kubenswrapper[5021]: I0121 15:24:20.765577 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"cbcf60368c95e2747fd275984fc6cfc714ca37c93d92edc446643a41c4022c56"} Jan 21 15:24:20 crc kubenswrapper[5021]: I0121 15:24:20.765587 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"bea603621cfb30c2b0a1dc2021ea515cb87cd590232f1c4696341f7eca1d4535"} Jan 21 15:24:20 crc kubenswrapper[5021]: I0121 15:24:20.900049 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 21 15:24:21 crc kubenswrapper[5021]: I0121 15:24:21.676624 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-17 06:25:59.414512101 +0000 UTC Jan 21 15:24:21 crc kubenswrapper[5021]: I0121 15:24:21.772785 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"6064978522c6ddd526c20221a2d4a73961beaea35b31eab9598087b1af017753"} Jan 21 15:24:21 crc kubenswrapper[5021]: I0121 15:24:21.772960 5021 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 15:24:21 crc kubenswrapper[5021]: I0121 15:24:21.773925 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:21 crc kubenswrapper[5021]: I0121 15:24:21.773956 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:21 crc kubenswrapper[5021]: I0121 15:24:21.773973 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:21 crc kubenswrapper[5021]: I0121 15:24:21.775217 5021 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="05695ff1eb5fa52521af079f1874dd650f22f233e0462daeeb9083142d5c3eee" exitCode=0 Jan 21 15:24:21 crc kubenswrapper[5021]: I0121 15:24:21.775270 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"05695ff1eb5fa52521af079f1874dd650f22f233e0462daeeb9083142d5c3eee"} Jan 21 15:24:21 crc kubenswrapper[5021]: I0121 15:24:21.775461 5021 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 15:24:21 crc kubenswrapper[5021]: I0121 15:24:21.777352 5021 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 15:24:21 crc kubenswrapper[5021]: I0121 15:24:21.778531 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:21 crc kubenswrapper[5021]: I0121 15:24:21.778571 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:21 crc kubenswrapper[5021]: I0121 15:24:21.778581 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:21 crc kubenswrapper[5021]: I0121 15:24:21.778627 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:21 crc kubenswrapper[5021]: I0121 15:24:21.779003 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:21 crc kubenswrapper[5021]: I0121 15:24:21.779060 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:21 crc kubenswrapper[5021]: I0121 15:24:21.900531 5021 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 15:24:21 crc kubenswrapper[5021]: I0121 15:24:21.902268 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:21 crc kubenswrapper[5021]: I0121 15:24:21.902370 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:21 crc kubenswrapper[5021]: I0121 15:24:21.902386 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:21 crc kubenswrapper[5021]: I0121 15:24:21.902423 5021 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 21 15:24:22 crc kubenswrapper[5021]: I0121 15:24:22.677521 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-17 05:03:13.60371926 +0000 UTC Jan 21 15:24:22 crc kubenswrapper[5021]: I0121 15:24:22.782088 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"effb786620c9d65d8acf9881ed0f98a94de9f39ca7eb6870377958061f4dc9af"} Jan 21 15:24:22 crc kubenswrapper[5021]: I0121 15:24:22.782121 5021 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 21 15:24:22 crc kubenswrapper[5021]: I0121 15:24:22.782136 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"f6e1f771251595f0de0a5ec1739ebe02a63de7d6a3cefdf90a42b48c115dab87"} Jan 21 15:24:22 crc kubenswrapper[5021]: I0121 15:24:22.782148 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"4972b65704958e549d665d88ee3470710096c4784f9872045274d8af63e2043d"} Jan 21 15:24:22 crc kubenswrapper[5021]: I0121 15:24:22.782156 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"21c65f33d2b7b951ba4b6f87e7246aa6911ac9b7db7be966578213fc3041441f"} Jan 21 15:24:22 crc kubenswrapper[5021]: I0121 15:24:22.782164 5021 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 15:24:22 crc kubenswrapper[5021]: I0121 15:24:22.782213 5021 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 15:24:22 crc kubenswrapper[5021]: I0121 15:24:22.782984 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:22 crc kubenswrapper[5021]: I0121 15:24:22.783013 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:22 crc kubenswrapper[5021]: I0121 15:24:22.783023 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:22 crc kubenswrapper[5021]: I0121 15:24:22.783092 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:22 crc kubenswrapper[5021]: I0121 15:24:22.783107 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:22 crc kubenswrapper[5021]: I0121 15:24:22.783117 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:22 crc kubenswrapper[5021]: I0121 15:24:22.789701 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 15:24:22 crc kubenswrapper[5021]: I0121 15:24:22.880609 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 21 15:24:23 crc kubenswrapper[5021]: I0121 15:24:23.016020 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 15:24:23 crc kubenswrapper[5021]: I0121 15:24:23.678460 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-07 00:38:05.533322912 +0000 UTC Jan 21 15:24:23 crc kubenswrapper[5021]: I0121 15:24:23.788324 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"50ec1a0333d7c102879ec60e1262b8035b347cad7f9107959e49b4718b3a79db"} Jan 21 15:24:23 crc kubenswrapper[5021]: I0121 15:24:23.788393 5021 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 15:24:23 crc kubenswrapper[5021]: I0121 15:24:23.788429 5021 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 21 15:24:23 crc kubenswrapper[5021]: I0121 15:24:23.788473 5021 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 15:24:23 crc kubenswrapper[5021]: I0121 15:24:23.788393 5021 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 15:24:23 crc kubenswrapper[5021]: I0121 15:24:23.789352 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:23 crc kubenswrapper[5021]: I0121 15:24:23.789384 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:23 crc kubenswrapper[5021]: I0121 15:24:23.789404 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:23 crc kubenswrapper[5021]: I0121 15:24:23.789428 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:23 crc kubenswrapper[5021]: I0121 15:24:23.789453 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:23 crc kubenswrapper[5021]: I0121 15:24:23.789465 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:23 crc kubenswrapper[5021]: I0121 15:24:23.789881 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:23 crc kubenswrapper[5021]: I0121 15:24:23.789934 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:23 crc kubenswrapper[5021]: I0121 15:24:23.789946 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:24 crc kubenswrapper[5021]: I0121 15:24:24.050011 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Jan 21 15:24:24 crc kubenswrapper[5021]: I0121 15:24:24.183619 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 21 15:24:24 crc kubenswrapper[5021]: I0121 15:24:24.183773 5021 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 15:24:24 crc kubenswrapper[5021]: I0121 15:24:24.184808 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:24 crc kubenswrapper[5021]: I0121 15:24:24.184838 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:24 crc kubenswrapper[5021]: I0121 15:24:24.184850 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:24 crc kubenswrapper[5021]: I0121 15:24:24.678592 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-21 06:07:48.286721488 +0000 UTC Jan 21 15:24:24 crc kubenswrapper[5021]: I0121 15:24:24.790607 5021 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 21 15:24:24 crc kubenswrapper[5021]: I0121 15:24:24.790668 5021 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 15:24:24 crc kubenswrapper[5021]: I0121 15:24:24.790835 5021 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 15:24:24 crc kubenswrapper[5021]: I0121 15:24:24.792637 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:24 crc kubenswrapper[5021]: I0121 15:24:24.792689 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:24 crc kubenswrapper[5021]: I0121 15:24:24.792704 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:24 crc kubenswrapper[5021]: I0121 15:24:24.793101 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:24 crc kubenswrapper[5021]: I0121 15:24:24.793157 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:24 crc kubenswrapper[5021]: I0121 15:24:24.793171 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:25 crc kubenswrapper[5021]: I0121 15:24:25.679738 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-30 04:11:49.993523534 +0000 UTC Jan 21 15:24:25 crc kubenswrapper[5021]: I0121 15:24:25.792140 5021 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 15:24:25 crc kubenswrapper[5021]: I0121 15:24:25.793004 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:25 crc kubenswrapper[5021]: I0121 15:24:25.793045 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:25 crc kubenswrapper[5021]: I0121 15:24:25.793057 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:26 crc kubenswrapper[5021]: I0121 15:24:26.243710 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 21 15:24:26 crc kubenswrapper[5021]: I0121 15:24:26.243861 5021 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 15:24:26 crc kubenswrapper[5021]: I0121 15:24:26.244956 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:26 crc kubenswrapper[5021]: I0121 15:24:26.245001 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:26 crc kubenswrapper[5021]: I0121 15:24:26.245014 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:26 crc kubenswrapper[5021]: I0121 15:24:26.680258 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-16 10:34:20.116269426 +0000 UTC Jan 21 15:24:27 crc kubenswrapper[5021]: I0121 15:24:27.681194 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-12 02:31:48.005709314 +0000 UTC Jan 21 15:24:28 crc kubenswrapper[5021]: I0121 15:24:28.314244 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 15:24:28 crc kubenswrapper[5021]: I0121 15:24:28.314401 5021 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 15:24:28 crc kubenswrapper[5021]: I0121 15:24:28.315434 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:28 crc kubenswrapper[5021]: I0121 15:24:28.315463 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:28 crc kubenswrapper[5021]: I0121 15:24:28.315475 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:28 crc kubenswrapper[5021]: I0121 15:24:28.681743 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-27 18:34:19.32193647 +0000 UTC Jan 21 15:24:28 crc kubenswrapper[5021]: E0121 15:24:28.794609 5021 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 21 15:24:29 crc kubenswrapper[5021]: I0121 15:24:29.244601 5021 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 21 15:24:29 crc kubenswrapper[5021]: I0121 15:24:29.244715 5021 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 21 15:24:29 crc kubenswrapper[5021]: I0121 15:24:29.682873 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-03 21:42:35.604988479 +0000 UTC Jan 21 15:24:30 crc kubenswrapper[5021]: I0121 15:24:30.286401 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 21 15:24:30 crc kubenswrapper[5021]: I0121 15:24:30.286669 5021 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 15:24:30 crc kubenswrapper[5021]: I0121 15:24:30.288384 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:30 crc kubenswrapper[5021]: I0121 15:24:30.288430 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:30 crc kubenswrapper[5021]: I0121 15:24:30.288447 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:30 crc kubenswrapper[5021]: I0121 15:24:30.290716 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 21 15:24:30 crc kubenswrapper[5021]: I0121 15:24:30.671298 5021 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Jan 21 15:24:30 crc kubenswrapper[5021]: I0121 15:24:30.683685 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-24 23:26:23.89024101 +0000 UTC Jan 21 15:24:30 crc kubenswrapper[5021]: E0121 15:24:30.734300 5021 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": net/http: TLS handshake timeout" logger="UnhandledError" Jan 21 15:24:30 crc kubenswrapper[5021]: I0121 15:24:30.805257 5021 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 15:24:30 crc kubenswrapper[5021]: I0121 15:24:30.806352 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:30 crc kubenswrapper[5021]: I0121 15:24:30.806428 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:30 crc kubenswrapper[5021]: I0121 15:24:30.806441 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:30 crc kubenswrapper[5021]: I0121 15:24:30.809743 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 21 15:24:31 crc kubenswrapper[5021]: I0121 15:24:31.253796 5021 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Jan 21 15:24:31 crc kubenswrapper[5021]: I0121 15:24:31.253864 5021 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Jan 21 15:24:31 crc kubenswrapper[5021]: I0121 15:24:31.268783 5021 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Jan 21 15:24:31 crc kubenswrapper[5021]: I0121 15:24:31.268861 5021 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Jan 21 15:24:31 crc kubenswrapper[5021]: I0121 15:24:31.684135 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-03 09:30:52.141244528 +0000 UTC Jan 21 15:24:31 crc kubenswrapper[5021]: I0121 15:24:31.808749 5021 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 15:24:31 crc kubenswrapper[5021]: I0121 15:24:31.810133 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:31 crc kubenswrapper[5021]: I0121 15:24:31.810170 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:31 crc kubenswrapper[5021]: I0121 15:24:31.810180 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:32 crc kubenswrapper[5021]: I0121 15:24:32.649992 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Jan 21 15:24:32 crc kubenswrapper[5021]: I0121 15:24:32.650301 5021 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 15:24:32 crc kubenswrapper[5021]: I0121 15:24:32.652165 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:32 crc kubenswrapper[5021]: I0121 15:24:32.652206 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:32 crc kubenswrapper[5021]: I0121 15:24:32.652218 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:32 crc kubenswrapper[5021]: I0121 15:24:32.683405 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Jan 21 15:24:32 crc kubenswrapper[5021]: I0121 15:24:32.685165 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-23 10:22:54.18655246 +0000 UTC Jan 21 15:24:32 crc kubenswrapper[5021]: I0121 15:24:32.795830 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 15:24:32 crc kubenswrapper[5021]: I0121 15:24:32.796116 5021 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 15:24:32 crc kubenswrapper[5021]: I0121 15:24:32.798198 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:32 crc kubenswrapper[5021]: I0121 15:24:32.798243 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:32 crc kubenswrapper[5021]: I0121 15:24:32.798254 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:32 crc kubenswrapper[5021]: I0121 15:24:32.800112 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 15:24:32 crc kubenswrapper[5021]: I0121 15:24:32.810349 5021 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 15:24:32 crc kubenswrapper[5021]: I0121 15:24:32.810565 5021 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 15:24:32 crc kubenswrapper[5021]: I0121 15:24:32.811364 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:32 crc kubenswrapper[5021]: I0121 15:24:32.811427 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:32 crc kubenswrapper[5021]: I0121 15:24:32.811443 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:32 crc kubenswrapper[5021]: I0121 15:24:32.811514 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:32 crc kubenswrapper[5021]: I0121 15:24:32.811534 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:32 crc kubenswrapper[5021]: I0121 15:24:32.811546 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:32 crc kubenswrapper[5021]: I0121 15:24:32.823237 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Jan 21 15:24:33 crc kubenswrapper[5021]: I0121 15:24:33.685950 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-25 06:29:21.988826515 +0000 UTC Jan 21 15:24:33 crc kubenswrapper[5021]: I0121 15:24:33.812870 5021 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 15:24:33 crc kubenswrapper[5021]: I0121 15:24:33.813825 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:33 crc kubenswrapper[5021]: I0121 15:24:33.813857 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:33 crc kubenswrapper[5021]: I0121 15:24:33.813868 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:34 crc kubenswrapper[5021]: I0121 15:24:34.686158 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-12 04:25:17.449397971 +0000 UTC Jan 21 15:24:35 crc kubenswrapper[5021]: I0121 15:24:35.120773 5021 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 21 15:24:35 crc kubenswrapper[5021]: I0121 15:24:35.138690 5021 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Jan 21 15:24:35 crc kubenswrapper[5021]: I0121 15:24:35.687275 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-26 17:22:48.3840119 +0000 UTC Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.238343 5021 trace.go:236] Trace[1224995753]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (21-Jan-2026 15:24:22.907) (total time: 13330ms): Jan 21 15:24:36 crc kubenswrapper[5021]: Trace[1224995753]: ---"Objects listed" error: 13330ms (15:24:36.238) Jan 21 15:24:36 crc kubenswrapper[5021]: Trace[1224995753]: [13.330511251s] [13.330511251s] END Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.238371 5021 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 21 15:24:36 crc kubenswrapper[5021]: E0121 15:24:36.253965 5021 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="3.2s" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.255793 5021 trace.go:236] Trace[1917449907]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (21-Jan-2026 15:24:22.408) (total time: 13847ms): Jan 21 15:24:36 crc kubenswrapper[5021]: Trace[1917449907]: ---"Objects listed" error: 13847ms (15:24:36.255) Jan 21 15:24:36 crc kubenswrapper[5021]: Trace[1917449907]: [13.847576424s] [13.847576424s] END Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.255831 5021 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.255809 5021 trace.go:236] Trace[864593391]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (21-Jan-2026 15:24:21.672) (total time: 14583ms): Jan 21 15:24:36 crc kubenswrapper[5021]: Trace[864593391]: ---"Objects listed" error: 14582ms (15:24:36.255) Jan 21 15:24:36 crc kubenswrapper[5021]: Trace[864593391]: [14.583017183s] [14.583017183s] END Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.256019 5021 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.256095 5021 trace.go:236] Trace[1435704556]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (21-Jan-2026 15:24:21.874) (total time: 14381ms): Jan 21 15:24:36 crc kubenswrapper[5021]: Trace[1435704556]: ---"Objects listed" error: 14381ms (15:24:36.256) Jan 21 15:24:36 crc kubenswrapper[5021]: Trace[1435704556]: [14.381742566s] [14.381742566s] END Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.256118 5021 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.258513 5021 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Jan 21 15:24:36 crc kubenswrapper[5021]: E0121 15:24:36.259533 5021 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.674739 5021 apiserver.go:52] "Watching apiserver" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.676764 5021 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.677239 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb"] Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.677845 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.677941 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.678050 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:24:36 crc kubenswrapper[5021]: E0121 15:24:36.678124 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.678194 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 21 15:24:36 crc kubenswrapper[5021]: E0121 15:24:36.678512 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.678746 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.678784 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 21 15:24:36 crc kubenswrapper[5021]: E0121 15:24:36.678794 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.679306 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.679897 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.679949 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.680265 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.680312 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.680548 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.680560 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.680562 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.683701 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.683713 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.684783 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.687431 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-21 16:37:32.695937221 +0000 UTC Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.690370 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.706870 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.714566 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.724128 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.732256 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.740628 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.746418 5021 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:52054->192.168.126.11:17697: read: connection reset by peer" start-of-body= Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.746435 5021 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:52056->192.168.126.11:17697: read: connection reset by peer" start-of-body= Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.746486 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:52054->192.168.126.11:17697: read: connection reset by peer" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.746602 5021 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:52056->192.168.126.11:17697: read: connection reset by peer" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.747203 5021 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.747241 5021 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.752986 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97953e2c-3d85-44a4-9900-b3c53432cc90\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1f9f8b3712320f6149f9124f2c3d85a9957a64678d7f1d0a11728c00afc74cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f805306703ac31a2921b4d3bfd202ef7c3c71e5954c987ad3b4bbd4827b46568\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4543419f7cc5df1a3788158c9b4540bb0e5320349e033d6a4a495267a84f5dd8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effb345da8dd5ec896b8b390b24c11d5fafc667b987d984ea5df7647068c4f46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.763230 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.774122 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.777872 5021 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.823059 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.824893 5021 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="6064978522c6ddd526c20221a2d4a73961beaea35b31eab9598087b1af017753" exitCode=255 Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.825014 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"6064978522c6ddd526c20221a2d4a73961beaea35b31eab9598087b1af017753"} Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.835782 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.836586 5021 scope.go:117] "RemoveContainer" containerID="6064978522c6ddd526c20221a2d4a73961beaea35b31eab9598087b1af017753" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.840835 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97953e2c-3d85-44a4-9900-b3c53432cc90\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1f9f8b3712320f6149f9124f2c3d85a9957a64678d7f1d0a11728c00afc74cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f805306703ac31a2921b4d3bfd202ef7c3c71e5954c987ad3b4bbd4827b46568\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4543419f7cc5df1a3788158c9b4540bb0e5320349e033d6a4a495267a84f5dd8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effb345da8dd5ec896b8b390b24c11d5fafc667b987d984ea5df7647068c4f46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.852159 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.862199 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.862265 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.862293 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.862434 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.863203 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.863209 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.863526 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.863685 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.864021 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.864191 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.864332 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.864224 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.864457 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.864492 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.864611 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.864651 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.864712 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.864745 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.864840 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.865015 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.865058 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.865100 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.865138 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.865169 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.865210 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.865248 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.865290 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.865408 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.865496 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.865567 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.865599 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.865630 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.865658 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.865711 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.865742 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.865776 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.865813 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.865698 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.865759 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.866570 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.866902 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.867008 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.867050 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.867092 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.867130 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.867202 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.867282 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.867322 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.867355 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.867391 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.867255 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.867422 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.867816 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.867949 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.868039 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.868400 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.868458 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.868414 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: E0121 15:24:36.868516 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:24:37.368442695 +0000 UTC m=+18.903556624 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.868511 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.868545 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.868476 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.868580 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.868689 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.868754 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.868863 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.869235 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.869286 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.869636 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.869974 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.870021 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.870170 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.869813 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.869233 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.870495 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.870783 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.869837 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.870964 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.871163 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.871282 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.871338 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.871345 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.871389 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.871716 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.872048 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.872517 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.872733 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.873442 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.873508 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.873541 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.874468 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.874496 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.874375 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.874536 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.874543 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.874574 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.874601 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.874626 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.874654 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.874684 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.874720 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.874746 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.874824 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.874769 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.875150 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.875223 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.875232 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.875309 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.875283 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.877033 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.876358 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.877033 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.877297 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.875305 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.877335 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.877574 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.877301 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.877862 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.877921 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.877954 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.877985 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.877598 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.877697 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.877698 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.877927 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.878233 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.878274 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.878557 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.878732 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.878446 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.878210 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.879136 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.879323 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.879538 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.879569 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.879151 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.878867 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.879311 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.879457 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.879757 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.879915 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.879945 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.880026 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.880047 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.880072 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.880071 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.880122 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.880257 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.880269 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.880294 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.880319 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.880333 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.880343 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.880453 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.880484 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.880511 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.880536 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.880563 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.880589 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.880594 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.880621 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.880647 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.880669 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.880720 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.880777 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.880801 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.880765 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.880829 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.880856 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.880881 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.880966 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.880993 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.881018 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.881042 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.881066 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.881089 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.881165 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.881192 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.881224 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.881278 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.881333 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.881379 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.881414 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.881444 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.881473 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.881507 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.881568 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.881607 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.881641 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.881677 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.881712 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.881745 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.881777 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.881813 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.881837 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.881851 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.881888 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.881959 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.881996 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.882030 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.882062 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.882095 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.882130 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.882182 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.882218 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.882254 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.882287 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.882321 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.882355 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.882389 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.882423 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.882458 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.882597 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.882635 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.882969 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.883018 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.883053 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.883091 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.883151 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.883222 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.883256 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.883295 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.883330 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.883365 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.883405 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.883443 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.883512 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.883554 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.883589 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.883625 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.883823 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.883892 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.883956 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.884068 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.884167 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.884214 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.884251 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.884291 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.884330 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.884391 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.884431 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.884471 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.884521 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.884558 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.884596 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.884645 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.884735 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.884774 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.884809 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.884845 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.884898 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.884987 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.885100 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.885148 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.885183 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.885219 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.885255 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.885290 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.885479 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.885519 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.885556 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.885593 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.885779 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.885826 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.885863 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.885902 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.885971 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.886045 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.886199 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.886412 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.886511 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.886574 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.886612 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.886686 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.886731 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.886772 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.886812 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.887123 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.887180 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.887219 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.887283 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.887325 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.887463 5021 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.887498 5021 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.887525 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.887548 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.887567 5021 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.887587 5021 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.887609 5021 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.887637 5021 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.887659 5021 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.887682 5021 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.887703 5021 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.887725 5021 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.887744 5021 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.887763 5021 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.887782 5021 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.887802 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.887822 5021 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.887842 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.887862 5021 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.887881 5021 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.887899 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.887947 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.887967 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.887986 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.882183 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.882421 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.888088 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.882485 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.882566 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.883046 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.883065 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.883176 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.883422 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.883514 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.883598 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.883715 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.883800 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.883894 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.883887 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.883997 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.884058 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.885670 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.885448 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.886385 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.886836 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.886953 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.887017 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.887399 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.887646 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.887674 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.887686 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.887973 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.888008 5021 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.888487 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.888597 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.888616 5021 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.888634 5021 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.888660 5021 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.888678 5021 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.888696 5021 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.888714 5021 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.888734 5021 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.888752 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.888771 5021 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.888790 5021 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.888814 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.888832 5021 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.888849 5021 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.888870 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.888890 5021 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.888937 5021 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.888956 5021 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.888996 5021 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.889015 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.889033 5021 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.889055 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.889074 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.889102 5021 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.889119 5021 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.889136 5021 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.889154 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.889289 5021 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.889304 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.889318 5021 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.889331 5021 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.889344 5021 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.889360 5021 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.889374 5021 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.889386 5021 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.889404 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.889423 5021 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.889441 5021 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.889458 5021 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.889478 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.889496 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.889514 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.889530 5021 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.889547 5021 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.889566 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.888056 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.888153 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.888747 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.889448 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.889674 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.889935 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.890132 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.890290 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.890570 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.890775 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.891028 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.891217 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.891462 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.891413 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.891678 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.891710 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.891860 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.892261 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.892589 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.892643 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.892707 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.892782 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.892816 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.893164 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.893173 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.893434 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.893606 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.893694 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.893783 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: E0121 15:24:36.894405 5021 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.894463 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: E0121 15:24:36.894481 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-21 15:24:37.394461496 +0000 UTC m=+18.929575405 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.894874 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.894935 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.894967 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.895306 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.895339 5021 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.895955 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.896110 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.896179 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.896206 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: E0121 15:24:36.896276 5021 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 21 15:24:36 crc kubenswrapper[5021]: E0121 15:24:36.896326 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-21 15:24:37.396309147 +0000 UTC m=+18.931423036 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.896477 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.897139 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.897579 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.897185 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.897896 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.897893 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.898271 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.898461 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.898533 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.899622 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.902468 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.903025 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.907606 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.910269 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.910405 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 21 15:24:36 crc kubenswrapper[5021]: E0121 15:24:36.910649 5021 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 21 15:24:36 crc kubenswrapper[5021]: E0121 15:24:36.910755 5021 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 21 15:24:36 crc kubenswrapper[5021]: E0121 15:24:36.910824 5021 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 15:24:36 crc kubenswrapper[5021]: E0121 15:24:36.910958 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-21 15:24:37.410938296 +0000 UTC m=+18.946052185 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.911390 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.913388 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: E0121 15:24:36.913512 5021 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 21 15:24:36 crc kubenswrapper[5021]: E0121 15:24:36.913600 5021 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 21 15:24:36 crc kubenswrapper[5021]: E0121 15:24:36.913615 5021 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 15:24:36 crc kubenswrapper[5021]: E0121 15:24:36.913688 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-21 15:24:37.413656761 +0000 UTC m=+18.948770650 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.915709 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.915821 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.916795 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.918312 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.918547 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.918805 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.918855 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.919017 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.919078 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.919144 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.919774 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.921387 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.922077 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.923284 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.923341 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.924581 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.924848 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.926852 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.927143 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.927154 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.928200 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.928183 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.928365 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.928430 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.928712 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.929038 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.929216 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.929300 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.929326 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.929444 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.929448 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.929491 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.929558 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.929800 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.930085 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.930099 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.931165 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.931250 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.931352 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.931462 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.931760 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.933566 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.933582 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.933588 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.936663 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.936747 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.937473 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.937778 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.943480 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.946104 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.956010 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.957887 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.990943 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.990988 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991030 5021 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991041 5021 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991052 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991062 5021 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991072 5021 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991151 5021 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991189 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991192 5021 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991209 5021 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991223 5021 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991239 5021 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991252 5021 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991264 5021 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991277 5021 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991289 5021 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991300 5021 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991312 5021 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991325 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991310 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991337 5021 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991457 5021 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991476 5021 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991492 5021 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991510 5021 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991524 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991538 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991552 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991565 5021 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991578 5021 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991591 5021 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991604 5021 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991619 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991633 5021 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991646 5021 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991684 5021 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991730 5021 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991745 5021 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991758 5021 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991771 5021 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991784 5021 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991798 5021 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991813 5021 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991826 5021 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991839 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991854 5021 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991868 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991881 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991894 5021 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991929 5021 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991942 5021 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991953 5021 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991968 5021 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991982 5021 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.991994 5021 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992007 5021 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992022 5021 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992038 5021 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992050 5021 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992062 5021 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992074 5021 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992091 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992104 5021 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992118 5021 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992131 5021 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992144 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992156 5021 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992169 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992182 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992210 5021 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992222 5021 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992238 5021 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992253 5021 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992265 5021 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992278 5021 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992290 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992302 5021 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992315 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992327 5021 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992338 5021 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992351 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992364 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992376 5021 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992388 5021 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992400 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992412 5021 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992423 5021 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992436 5021 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992447 5021 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992461 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992473 5021 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992484 5021 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992497 5021 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992509 5021 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992521 5021 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992535 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992546 5021 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992558 5021 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992570 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992582 5021 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992596 5021 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992611 5021 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992622 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992636 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992648 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992660 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992713 5021 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992727 5021 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992739 5021 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992751 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992762 5021 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992774 5021 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992786 5021 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992800 5021 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992811 5021 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992822 5021 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992835 5021 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992847 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992860 5021 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992871 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992884 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992896 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992927 5021 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992941 5021 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992954 5021 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.992966 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Jan 21 15:24:36 crc kubenswrapper[5021]: I0121 15:24:36.994891 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 21 15:24:37 crc kubenswrapper[5021]: I0121 15:24:37.003757 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 21 15:24:37 crc kubenswrapper[5021]: W0121 15:24:37.005724 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37a5e44f_9a88_4405_be8a_b645485e7312.slice/crio-e1e58b6d17613401cb62c73d052e88b7ab2d778242b303f708c96250598de7ec WatchSource:0}: Error finding container e1e58b6d17613401cb62c73d052e88b7ab2d778242b303f708c96250598de7ec: Status 404 returned error can't find the container with id e1e58b6d17613401cb62c73d052e88b7ab2d778242b303f708c96250598de7ec Jan 21 15:24:37 crc kubenswrapper[5021]: I0121 15:24:37.008839 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 21 15:24:37 crc kubenswrapper[5021]: W0121 15:24:37.015114 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef543e1b_8068_4ea3_b32a_61027b32e95d.slice/crio-244b182ce3c7b46ded3975bef3121b7850ad56db7f20945b35d56d804c6a19da WatchSource:0}: Error finding container 244b182ce3c7b46ded3975bef3121b7850ad56db7f20945b35d56d804c6a19da: Status 404 returned error can't find the container with id 244b182ce3c7b46ded3975bef3121b7850ad56db7f20945b35d56d804c6a19da Jan 21 15:24:37 crc kubenswrapper[5021]: W0121 15:24:37.021070 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd75a4c96_2883_4a0b_bab2_0fab2b6c0b49.slice/crio-faa4c400755b7fe4e761d574ed2eb65642a5a5430658e300125711c683a65caf WatchSource:0}: Error finding container faa4c400755b7fe4e761d574ed2eb65642a5a5430658e300125711c683a65caf: Status 404 returned error can't find the container with id faa4c400755b7fe4e761d574ed2eb65642a5a5430658e300125711c683a65caf Jan 21 15:24:37 crc kubenswrapper[5021]: I0121 15:24:37.395696 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:24:37 crc kubenswrapper[5021]: E0121 15:24:37.395883 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:24:38.395832073 +0000 UTC m=+19.930945972 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:24:37 crc kubenswrapper[5021]: I0121 15:24:37.396177 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:24:37 crc kubenswrapper[5021]: E0121 15:24:37.396288 5021 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 21 15:24:37 crc kubenswrapper[5021]: E0121 15:24:37.396341 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-21 15:24:38.396328747 +0000 UTC m=+19.931442636 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 21 15:24:37 crc kubenswrapper[5021]: I0121 15:24:37.497345 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:24:37 crc kubenswrapper[5021]: I0121 15:24:37.497386 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:24:37 crc kubenswrapper[5021]: I0121 15:24:37.497404 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:24:37 crc kubenswrapper[5021]: E0121 15:24:37.497591 5021 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 21 15:24:37 crc kubenswrapper[5021]: E0121 15:24:37.497699 5021 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 21 15:24:37 crc kubenswrapper[5021]: E0121 15:24:37.497718 5021 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 21 15:24:37 crc kubenswrapper[5021]: E0121 15:24:37.497729 5021 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 15:24:37 crc kubenswrapper[5021]: E0121 15:24:37.497703 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-21 15:24:38.497677017 +0000 UTC m=+20.032790916 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 21 15:24:37 crc kubenswrapper[5021]: E0121 15:24:37.497804 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-21 15:24:38.49779034 +0000 UTC m=+20.032904229 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 15:24:37 crc kubenswrapper[5021]: E0121 15:24:37.497853 5021 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 21 15:24:37 crc kubenswrapper[5021]: E0121 15:24:37.497939 5021 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 21 15:24:37 crc kubenswrapper[5021]: E0121 15:24:37.497954 5021 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 15:24:37 crc kubenswrapper[5021]: E0121 15:24:37.498050 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-21 15:24:38.498029687 +0000 UTC m=+20.033143666 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 15:24:37 crc kubenswrapper[5021]: I0121 15:24:37.687710 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-12 21:05:08.628490038 +0000 UTC Jan 21 15:24:37 crc kubenswrapper[5021]: I0121 15:24:37.828453 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"faa4c400755b7fe4e761d574ed2eb65642a5a5430658e300125711c683a65caf"} Jan 21 15:24:37 crc kubenswrapper[5021]: I0121 15:24:37.830006 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"da2a92bf0daa0e03e89d2a5ff93137363faab6817894aadb90e424b35c878da6"} Jan 21 15:24:37 crc kubenswrapper[5021]: I0121 15:24:37.830159 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"22e8a3282b80d888e687ba06911f278868d7f50c3f0c52745beebc66440deeed"} Jan 21 15:24:37 crc kubenswrapper[5021]: I0121 15:24:37.830271 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"244b182ce3c7b46ded3975bef3121b7850ad56db7f20945b35d56d804c6a19da"} Jan 21 15:24:37 crc kubenswrapper[5021]: I0121 15:24:37.831112 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"2cec185f78f20cb42969ab7a1c67289c23ab5714e5c397b1fb5ee55540adcc80"} Jan 21 15:24:37 crc kubenswrapper[5021]: I0121 15:24:37.831141 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"e1e58b6d17613401cb62c73d052e88b7ab2d778242b303f708c96250598de7ec"} Jan 21 15:24:37 crc kubenswrapper[5021]: I0121 15:24:37.833110 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 21 15:24:37 crc kubenswrapper[5021]: I0121 15:24:37.834846 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"39f53539825ef70fcde1b61f8e650f22c059fd674523bfe1882b9844923ce9c1"} Jan 21 15:24:37 crc kubenswrapper[5021]: I0121 15:24:37.835060 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 15:24:37 crc kubenswrapper[5021]: I0121 15:24:37.845657 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:37Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:37 crc kubenswrapper[5021]: I0121 15:24:37.863076 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:37Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:37 crc kubenswrapper[5021]: I0121 15:24:37.875799 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:37Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:37 crc kubenswrapper[5021]: I0121 15:24:37.888011 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97953e2c-3d85-44a4-9900-b3c53432cc90\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1f9f8b3712320f6149f9124f2c3d85a9957a64678d7f1d0a11728c00afc74cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f805306703ac31a2921b4d3bfd202ef7c3c71e5954c987ad3b4bbd4827b46568\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4543419f7cc5df1a3788158c9b4540bb0e5320349e033d6a4a495267a84f5dd8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effb345da8dd5ec896b8b390b24c11d5fafc667b987d984ea5df7647068c4f46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:37Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:37 crc kubenswrapper[5021]: I0121 15:24:37.899729 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://da2a92bf0daa0e03e89d2a5ff93137363faab6817894aadb90e424b35c878da6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e8a3282b80d888e687ba06911f278868d7f50c3f0c52745beebc66440deeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:37Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:37 crc kubenswrapper[5021]: I0121 15:24:37.918282 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bea603621cfb30c2b0a1dc2021ea515cb87cd590232f1c4696341f7eca1d4535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10f4a9dbab7b11fa3eaaae6060df484ff7e212710188c29acc1834f07310d49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbcf60368c95e2747fd275984fc6cfc714ca37c93d92edc446643a41c4022c56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6064978522c6ddd526c20221a2d4a73961beaea35b31eab9598087b1af017753\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6064978522c6ddd526c20221a2d4a73961beaea35b31eab9598087b1af017753\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"stHeaderAuthRequestController\\\\nI0121 15:24:36.730187 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 15:24:36.730206 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 15:24:36.730225 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 15:24:36.730232 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 15:24:36.735520 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0121 15:24:36.735652 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735731 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 15:24:36.735756 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 15:24:36.735765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 15:24:36.735770 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0121 15:24:36.737297 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0121 15:24:36.741633 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741680 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741699 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-1615556949/tls.crt::/tmp/serving-cert-1615556949/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769009060\\\\\\\\\\\\\\\" (2026-01-21 15:24:20 +0000 UTC to 2026-02-20 15:24:21 +0000 UTC (now=2026-01-21 15:24:36.741651328 +0000 UTC))\\\\\\\"\\\\nF0121 15:24:36.741699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06cf2acfbc236dc530bb69dff9e6e83755c426daba101b6b6abe9c23684d2b7d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:37Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:37 crc kubenswrapper[5021]: I0121 15:24:37.931764 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:37Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:37 crc kubenswrapper[5021]: I0121 15:24:37.945856 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:37Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:37 crc kubenswrapper[5021]: I0121 15:24:37.962601 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97953e2c-3d85-44a4-9900-b3c53432cc90\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1f9f8b3712320f6149f9124f2c3d85a9957a64678d7f1d0a11728c00afc74cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f805306703ac31a2921b4d3bfd202ef7c3c71e5954c987ad3b4bbd4827b46568\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4543419f7cc5df1a3788158c9b4540bb0e5320349e033d6a4a495267a84f5dd8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effb345da8dd5ec896b8b390b24c11d5fafc667b987d984ea5df7647068c4f46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:37Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:37 crc kubenswrapper[5021]: I0121 15:24:37.977288 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:37Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:37 crc kubenswrapper[5021]: I0121 15:24:37.989000 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:37Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.000994 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:37Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.014965 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bea603621cfb30c2b0a1dc2021ea515cb87cd590232f1c4696341f7eca1d4535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10f4a9dbab7b11fa3eaaae6060df484ff7e212710188c29acc1834f07310d49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbcf60368c95e2747fd275984fc6cfc714ca37c93d92edc446643a41c4022c56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39f53539825ef70fcde1b61f8e650f22c059fd674523bfe1882b9844923ce9c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6064978522c6ddd526c20221a2d4a73961beaea35b31eab9598087b1af017753\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"stHeaderAuthRequestController\\\\nI0121 15:24:36.730187 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 15:24:36.730206 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 15:24:36.730225 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 15:24:36.730232 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 15:24:36.735520 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0121 15:24:36.735652 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735731 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 15:24:36.735756 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 15:24:36.735765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 15:24:36.735770 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0121 15:24:36.737297 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0121 15:24:36.741633 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741680 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741699 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-1615556949/tls.crt::/tmp/serving-cert-1615556949/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769009060\\\\\\\\\\\\\\\" (2026-01-21 15:24:20 +0000 UTC to 2026-02-20 15:24:21 +0000 UTC (now=2026-01-21 15:24:36.741651328 +0000 UTC))\\\\\\\"\\\\nF0121 15:24:36.741699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06cf2acfbc236dc530bb69dff9e6e83755c426daba101b6b6abe9c23684d2b7d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:38Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.033369 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:38Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.051988 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cec185f78f20cb42969ab7a1c67289c23ab5714e5c397b1fb5ee55540adcc80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:38Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.065428 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://da2a92bf0daa0e03e89d2a5ff93137363faab6817894aadb90e424b35c878da6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e8a3282b80d888e687ba06911f278868d7f50c3f0c52745beebc66440deeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:38Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.405775 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.405865 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:24:38 crc kubenswrapper[5021]: E0121 15:24:38.405958 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:24:40.405932918 +0000 UTC m=+21.941046837 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:24:38 crc kubenswrapper[5021]: E0121 15:24:38.405984 5021 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 21 15:24:38 crc kubenswrapper[5021]: E0121 15:24:38.406024 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-21 15:24:40.406015331 +0000 UTC m=+21.941129220 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.506649 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:24:38 crc kubenswrapper[5021]: E0121 15:24:38.506826 5021 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 21 15:24:38 crc kubenswrapper[5021]: E0121 15:24:38.506958 5021 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 21 15:24:38 crc kubenswrapper[5021]: E0121 15:24:38.506973 5021 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 15:24:38 crc kubenswrapper[5021]: E0121 15:24:38.507026 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-21 15:24:40.507010462 +0000 UTC m=+22.042124351 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 15:24:38 crc kubenswrapper[5021]: E0121 15:24:38.507111 5021 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 21 15:24:38 crc kubenswrapper[5021]: E0121 15:24:38.507192 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-21 15:24:40.507173926 +0000 UTC m=+22.042287815 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.506931 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.507355 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:24:38 crc kubenswrapper[5021]: E0121 15:24:38.507430 5021 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 21 15:24:38 crc kubenswrapper[5021]: E0121 15:24:38.507449 5021 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 21 15:24:38 crc kubenswrapper[5021]: E0121 15:24:38.507458 5021 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 15:24:38 crc kubenswrapper[5021]: E0121 15:24:38.507598 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-21 15:24:40.507580937 +0000 UTC m=+22.042694826 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.689658 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-19 20:38:40.394516931 +0000 UTC Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.737603 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.737677 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.737602 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:24:38 crc kubenswrapper[5021]: E0121 15:24:38.737762 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:24:38 crc kubenswrapper[5021]: E0121 15:24:38.737850 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:24:38 crc kubenswrapper[5021]: E0121 15:24:38.737939 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.744134 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.744701 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.746138 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.746893 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.748082 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.748571 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.749288 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.750382 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.751098 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.752119 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.752672 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.753934 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.754471 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.754668 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:38Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.754979 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.755834 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.756356 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.757302 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.757722 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.758299 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.759285 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.759696 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.760606 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.761016 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.761940 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.762327 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.762872 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.763855 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.764364 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.765149 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:38Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.765290 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.765711 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.766563 5021 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.766662 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.768483 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.769306 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.769765 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.771201 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.771804 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.772651 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.773231 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.774192 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.774638 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.775317 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97953e2c-3d85-44a4-9900-b3c53432cc90\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1f9f8b3712320f6149f9124f2c3d85a9957a64678d7f1d0a11728c00afc74cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f805306703ac31a2921b4d3bfd202ef7c3c71e5954c987ad3b4bbd4827b46568\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4543419f7cc5df1a3788158c9b4540bb0e5320349e033d6a4a495267a84f5dd8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effb345da8dd5ec896b8b390b24c11d5fafc667b987d984ea5df7647068c4f46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:38Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.775542 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.776111 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.777188 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.777626 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.778489 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.778963 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.780174 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.780705 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.781540 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.782005 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.782870 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.783433 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.783862 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.785598 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:38Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.798529 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bea603621cfb30c2b0a1dc2021ea515cb87cd590232f1c4696341f7eca1d4535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10f4a9dbab7b11fa3eaaae6060df484ff7e212710188c29acc1834f07310d49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbcf60368c95e2747fd275984fc6cfc714ca37c93d92edc446643a41c4022c56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39f53539825ef70fcde1b61f8e650f22c059fd674523bfe1882b9844923ce9c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6064978522c6ddd526c20221a2d4a73961beaea35b31eab9598087b1af017753\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"stHeaderAuthRequestController\\\\nI0121 15:24:36.730187 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 15:24:36.730206 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 15:24:36.730225 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 15:24:36.730232 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 15:24:36.735520 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0121 15:24:36.735652 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735731 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 15:24:36.735756 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 15:24:36.735765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 15:24:36.735770 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0121 15:24:36.737297 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0121 15:24:36.741633 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741680 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741699 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-1615556949/tls.crt::/tmp/serving-cert-1615556949/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769009060\\\\\\\\\\\\\\\" (2026-01-21 15:24:20 +0000 UTC to 2026-02-20 15:24:21 +0000 UTC (now=2026-01-21 15:24:36.741651328 +0000 UTC))\\\\\\\"\\\\nF0121 15:24:36.741699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06cf2acfbc236dc530bb69dff9e6e83755c426daba101b6b6abe9c23684d2b7d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:38Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.811503 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:38Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.826129 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cec185f78f20cb42969ab7a1c67289c23ab5714e5c397b1fb5ee55540adcc80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:38Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:38 crc kubenswrapper[5021]: I0121 15:24:38.840453 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://da2a92bf0daa0e03e89d2a5ff93137363faab6817894aadb90e424b35c878da6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e8a3282b80d888e687ba06911f278868d7f50c3f0c52745beebc66440deeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:38Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.459616 5021 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.461756 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.461813 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.461835 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.461987 5021 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.469319 5021 kubelet_node_status.go:115] "Node was previously registered" node="crc" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.469657 5021 kubelet_node_status.go:79] "Successfully registered node" node="crc" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.471444 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.471492 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.471503 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.471520 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.471535 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:39Z","lastTransitionTime":"2026-01-21T15:24:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:39 crc kubenswrapper[5021]: E0121 15:24:39.499724 5021 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90fd653a-5482-4360-a078-7b7d7b2b9201\\\",\\\"systemUUID\\\":\\\"758eea6e-3652-403c-8df7-2cf690a0b7a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:39Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.502851 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.502896 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.502922 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.502941 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.502950 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:39Z","lastTransitionTime":"2026-01-21T15:24:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:39 crc kubenswrapper[5021]: E0121 15:24:39.524825 5021 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90fd653a-5482-4360-a078-7b7d7b2b9201\\\",\\\"systemUUID\\\":\\\"758eea6e-3652-403c-8df7-2cf690a0b7a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:39Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.528955 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.529006 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.529019 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.529043 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.529058 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:39Z","lastTransitionTime":"2026-01-21T15:24:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:39 crc kubenswrapper[5021]: E0121 15:24:39.542412 5021 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90fd653a-5482-4360-a078-7b7d7b2b9201\\\",\\\"systemUUID\\\":\\\"758eea6e-3652-403c-8df7-2cf690a0b7a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:39Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.546518 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.546569 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.546582 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.546603 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.546617 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:39Z","lastTransitionTime":"2026-01-21T15:24:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:39 crc kubenswrapper[5021]: E0121 15:24:39.559252 5021 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90fd653a-5482-4360-a078-7b7d7b2b9201\\\",\\\"systemUUID\\\":\\\"758eea6e-3652-403c-8df7-2cf690a0b7a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:39Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.563784 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.563850 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.563873 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.563898 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.563937 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:39Z","lastTransitionTime":"2026-01-21T15:24:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:39 crc kubenswrapper[5021]: E0121 15:24:39.580294 5021 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90fd653a-5482-4360-a078-7b7d7b2b9201\\\",\\\"systemUUID\\\":\\\"758eea6e-3652-403c-8df7-2cf690a0b7a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:39Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:39 crc kubenswrapper[5021]: E0121 15:24:39.580486 5021 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.582185 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.582214 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.582222 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.582237 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.582247 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:39Z","lastTransitionTime":"2026-01-21T15:24:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.684534 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.684604 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.684618 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.684636 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.684649 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:39Z","lastTransitionTime":"2026-01-21T15:24:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.690840 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-04 14:31:23.162574537 +0000 UTC Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.786541 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.786578 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.786589 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.786623 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.786634 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:39Z","lastTransitionTime":"2026-01-21T15:24:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.841052 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"0aed55e8987f8148a0e729da9e5dcad94bc05e1441eb8e087e05d8b81da4d2df"} Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.859276 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bea603621cfb30c2b0a1dc2021ea515cb87cd590232f1c4696341f7eca1d4535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10f4a9dbab7b11fa3eaaae6060df484ff7e212710188c29acc1834f07310d49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbcf60368c95e2747fd275984fc6cfc714ca37c93d92edc446643a41c4022c56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39f53539825ef70fcde1b61f8e650f22c059fd674523bfe1882b9844923ce9c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6064978522c6ddd526c20221a2d4a73961beaea35b31eab9598087b1af017753\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"stHeaderAuthRequestController\\\\nI0121 15:24:36.730187 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 15:24:36.730206 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 15:24:36.730225 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 15:24:36.730232 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 15:24:36.735520 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0121 15:24:36.735652 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735731 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 15:24:36.735756 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 15:24:36.735765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 15:24:36.735770 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0121 15:24:36.737297 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0121 15:24:36.741633 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741680 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741699 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-1615556949/tls.crt::/tmp/serving-cert-1615556949/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769009060\\\\\\\\\\\\\\\" (2026-01-21 15:24:20 +0000 UTC to 2026-02-20 15:24:21 +0000 UTC (now=2026-01-21 15:24:36.741651328 +0000 UTC))\\\\\\\"\\\\nF0121 15:24:36.741699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06cf2acfbc236dc530bb69dff9e6e83755c426daba101b6b6abe9c23684d2b7d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:39Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.872467 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:39Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.883519 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cec185f78f20cb42969ab7a1c67289c23ab5714e5c397b1fb5ee55540adcc80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:39Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.888324 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.888360 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.888368 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.888401 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.888412 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:39Z","lastTransitionTime":"2026-01-21T15:24:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.897550 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://da2a92bf0daa0e03e89d2a5ff93137363faab6817894aadb90e424b35c878da6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e8a3282b80d888e687ba06911f278868d7f50c3f0c52745beebc66440deeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:39Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.909567 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:39Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.924109 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:39Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.934191 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0aed55e8987f8148a0e729da9e5dcad94bc05e1441eb8e087e05d8b81da4d2df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:39Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.947792 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97953e2c-3d85-44a4-9900-b3c53432cc90\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1f9f8b3712320f6149f9124f2c3d85a9957a64678d7f1d0a11728c00afc74cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f805306703ac31a2921b4d3bfd202ef7c3c71e5954c987ad3b4bbd4827b46568\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4543419f7cc5df1a3788158c9b4540bb0e5320349e033d6a4a495267a84f5dd8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effb345da8dd5ec896b8b390b24c11d5fafc667b987d984ea5df7647068c4f46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:39Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.990422 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.990470 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.990485 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.990505 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:39 crc kubenswrapper[5021]: I0121 15:24:39.990518 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:39Z","lastTransitionTime":"2026-01-21T15:24:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.093232 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.093289 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.093299 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.093313 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.093341 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:40Z","lastTransitionTime":"2026-01-21T15:24:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.195014 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.195052 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.195062 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.195076 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.195085 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:40Z","lastTransitionTime":"2026-01-21T15:24:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.297768 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.297847 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.297862 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.297879 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.297896 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:40Z","lastTransitionTime":"2026-01-21T15:24:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.400777 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.400863 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.400890 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.400963 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.400988 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:40Z","lastTransitionTime":"2026-01-21T15:24:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.424053 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.424166 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:24:40 crc kubenswrapper[5021]: E0121 15:24:40.424245 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:24:44.424217096 +0000 UTC m=+25.959331005 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:24:40 crc kubenswrapper[5021]: E0121 15:24:40.424306 5021 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 21 15:24:40 crc kubenswrapper[5021]: E0121 15:24:40.424391 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-21 15:24:44.424370481 +0000 UTC m=+25.959484410 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.504072 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.504139 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.504153 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.504172 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.504191 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:40Z","lastTransitionTime":"2026-01-21T15:24:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.524953 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:24:40 crc kubenswrapper[5021]: E0121 15:24:40.525038 5021 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.525058 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:24:40 crc kubenswrapper[5021]: E0121 15:24:40.525121 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-21 15:24:44.525097454 +0000 UTC m=+26.060211363 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.525150 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:24:40 crc kubenswrapper[5021]: E0121 15:24:40.525214 5021 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 21 15:24:40 crc kubenswrapper[5021]: E0121 15:24:40.525248 5021 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 21 15:24:40 crc kubenswrapper[5021]: E0121 15:24:40.525255 5021 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 21 15:24:40 crc kubenswrapper[5021]: E0121 15:24:40.525295 5021 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 15:24:40 crc kubenswrapper[5021]: E0121 15:24:40.525268 5021 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 21 15:24:40 crc kubenswrapper[5021]: E0121 15:24:40.525353 5021 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 15:24:40 crc kubenswrapper[5021]: E0121 15:24:40.525374 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-21 15:24:44.525345671 +0000 UTC m=+26.060459610 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 15:24:40 crc kubenswrapper[5021]: E0121 15:24:40.525444 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-21 15:24:44.525417443 +0000 UTC m=+26.060531372 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.607387 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.607467 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.607481 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.607503 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.607518 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:40Z","lastTransitionTime":"2026-01-21T15:24:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.691279 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-17 02:30:13.721489296 +0000 UTC Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.710629 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.710672 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.710684 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.710702 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.710715 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:40Z","lastTransitionTime":"2026-01-21T15:24:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.737101 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.737141 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.737243 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:24:40 crc kubenswrapper[5021]: E0121 15:24:40.737387 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:24:40 crc kubenswrapper[5021]: E0121 15:24:40.737489 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:24:40 crc kubenswrapper[5021]: E0121 15:24:40.737884 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.812521 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.812819 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.812941 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.813039 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.813112 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:40Z","lastTransitionTime":"2026-01-21T15:24:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.915720 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.916110 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.916261 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.916385 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:40 crc kubenswrapper[5021]: I0121 15:24:40.916588 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:40Z","lastTransitionTime":"2026-01-21T15:24:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:41 crc kubenswrapper[5021]: I0121 15:24:41.019377 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:41 crc kubenswrapper[5021]: I0121 15:24:41.019651 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:41 crc kubenswrapper[5021]: I0121 15:24:41.019713 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:41 crc kubenswrapper[5021]: I0121 15:24:41.019780 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:41 crc kubenswrapper[5021]: I0121 15:24:41.019843 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:41Z","lastTransitionTime":"2026-01-21T15:24:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:41 crc kubenswrapper[5021]: I0121 15:24:41.122516 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:41 crc kubenswrapper[5021]: I0121 15:24:41.122736 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:41 crc kubenswrapper[5021]: I0121 15:24:41.122797 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:41 crc kubenswrapper[5021]: I0121 15:24:41.122893 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:41 crc kubenswrapper[5021]: I0121 15:24:41.123026 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:41Z","lastTransitionTime":"2026-01-21T15:24:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:41 crc kubenswrapper[5021]: I0121 15:24:41.225595 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:41 crc kubenswrapper[5021]: I0121 15:24:41.225865 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:41 crc kubenswrapper[5021]: I0121 15:24:41.226051 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:41 crc kubenswrapper[5021]: I0121 15:24:41.226162 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:41 crc kubenswrapper[5021]: I0121 15:24:41.226240 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:41Z","lastTransitionTime":"2026-01-21T15:24:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:41 crc kubenswrapper[5021]: I0121 15:24:41.329158 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:41 crc kubenswrapper[5021]: I0121 15:24:41.329547 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:41 crc kubenswrapper[5021]: I0121 15:24:41.329738 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:41 crc kubenswrapper[5021]: I0121 15:24:41.329970 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:41 crc kubenswrapper[5021]: I0121 15:24:41.330150 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:41Z","lastTransitionTime":"2026-01-21T15:24:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:41 crc kubenswrapper[5021]: I0121 15:24:41.432948 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:41 crc kubenswrapper[5021]: I0121 15:24:41.433014 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:41 crc kubenswrapper[5021]: I0121 15:24:41.433029 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:41 crc kubenswrapper[5021]: I0121 15:24:41.433049 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:41 crc kubenswrapper[5021]: I0121 15:24:41.433062 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:41Z","lastTransitionTime":"2026-01-21T15:24:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:41 crc kubenswrapper[5021]: I0121 15:24:41.535139 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:41 crc kubenswrapper[5021]: I0121 15:24:41.535193 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:41 crc kubenswrapper[5021]: I0121 15:24:41.535207 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:41 crc kubenswrapper[5021]: I0121 15:24:41.535221 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:41 crc kubenswrapper[5021]: I0121 15:24:41.535231 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:41Z","lastTransitionTime":"2026-01-21T15:24:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:41 crc kubenswrapper[5021]: I0121 15:24:41.637981 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:41 crc kubenswrapper[5021]: I0121 15:24:41.638041 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:41 crc kubenswrapper[5021]: I0121 15:24:41.638054 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:41 crc kubenswrapper[5021]: I0121 15:24:41.638073 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:41 crc kubenswrapper[5021]: I0121 15:24:41.638086 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:41Z","lastTransitionTime":"2026-01-21T15:24:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:41 crc kubenswrapper[5021]: I0121 15:24:41.691989 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-17 16:29:59.822256788 +0000 UTC Jan 21 15:24:41 crc kubenswrapper[5021]: I0121 15:24:41.740845 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:41 crc kubenswrapper[5021]: I0121 15:24:41.740890 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:41 crc kubenswrapper[5021]: I0121 15:24:41.740901 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:41 crc kubenswrapper[5021]: I0121 15:24:41.740941 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:41 crc kubenswrapper[5021]: I0121 15:24:41.740955 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:41Z","lastTransitionTime":"2026-01-21T15:24:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:41 crc kubenswrapper[5021]: I0121 15:24:41.843172 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:41 crc kubenswrapper[5021]: I0121 15:24:41.843220 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:41 crc kubenswrapper[5021]: I0121 15:24:41.843231 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:41 crc kubenswrapper[5021]: I0121 15:24:41.843248 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:41 crc kubenswrapper[5021]: I0121 15:24:41.843257 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:41Z","lastTransitionTime":"2026-01-21T15:24:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:41 crc kubenswrapper[5021]: I0121 15:24:41.945957 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:41 crc kubenswrapper[5021]: I0121 15:24:41.946005 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:41 crc kubenswrapper[5021]: I0121 15:24:41.946015 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:41 crc kubenswrapper[5021]: I0121 15:24:41.946030 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:41 crc kubenswrapper[5021]: I0121 15:24:41.946042 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:41Z","lastTransitionTime":"2026-01-21T15:24:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.048658 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.048699 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.048711 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.048726 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.048737 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:42Z","lastTransitionTime":"2026-01-21T15:24:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.152021 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.152072 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.152082 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.152100 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.152113 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:42Z","lastTransitionTime":"2026-01-21T15:24:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.256443 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.256488 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.256496 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.256512 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.256521 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:42Z","lastTransitionTime":"2026-01-21T15:24:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.328404 5021 csr.go:261] certificate signing request csr-pvfvv is approved, waiting to be issued Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.359204 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.359262 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.359273 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.359295 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.359310 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:42Z","lastTransitionTime":"2026-01-21T15:24:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.378880 5021 csr.go:257] certificate signing request csr-pvfvv is issued Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.461934 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.461969 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.461983 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.462000 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.462009 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:42Z","lastTransitionTime":"2026-01-21T15:24:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.564025 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.564073 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.564082 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.564097 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.564107 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:42Z","lastTransitionTime":"2026-01-21T15:24:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.666300 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.666357 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.666369 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.666387 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.666399 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:42Z","lastTransitionTime":"2026-01-21T15:24:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.692615 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-09 07:06:20.019694674 +0000 UTC Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.737316 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.737371 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:24:42 crc kubenswrapper[5021]: E0121 15:24:42.737467 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.737487 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:24:42 crc kubenswrapper[5021]: E0121 15:24:42.737657 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:24:42 crc kubenswrapper[5021]: E0121 15:24:42.737806 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.769122 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.769160 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.769169 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.769182 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.769191 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:42Z","lastTransitionTime":"2026-01-21T15:24:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.822956 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-vg9bt"] Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.823279 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-vg9bt" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.826230 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.826502 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.826669 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.844788 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://da2a92bf0daa0e03e89d2a5ff93137363faab6817894aadb90e424b35c878da6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e8a3282b80d888e687ba06911f278868d7f50c3f0c52745beebc66440deeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:42Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.860778 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vg9bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78b7c66b-9935-480c-bf2e-9109b6141006\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:42Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:42Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t65tk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vg9bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:42Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.871869 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.871929 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.871939 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.871955 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.871971 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:42Z","lastTransitionTime":"2026-01-21T15:24:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.876258 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bea603621cfb30c2b0a1dc2021ea515cb87cd590232f1c4696341f7eca1d4535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10f4a9dbab7b11fa3eaaae6060df484ff7e212710188c29acc1834f07310d49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbcf60368c95e2747fd275984fc6cfc714ca37c93d92edc446643a41c4022c56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39f53539825ef70fcde1b61f8e650f22c059fd674523bfe1882b9844923ce9c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6064978522c6ddd526c20221a2d4a73961beaea35b31eab9598087b1af017753\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"stHeaderAuthRequestController\\\\nI0121 15:24:36.730187 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 15:24:36.730206 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 15:24:36.730225 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 15:24:36.730232 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 15:24:36.735520 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0121 15:24:36.735652 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735731 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 15:24:36.735756 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 15:24:36.735765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 15:24:36.735770 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0121 15:24:36.737297 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0121 15:24:36.741633 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741680 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741699 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-1615556949/tls.crt::/tmp/serving-cert-1615556949/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769009060\\\\\\\\\\\\\\\" (2026-01-21 15:24:20 +0000 UTC to 2026-02-20 15:24:21 +0000 UTC (now=2026-01-21 15:24:36.741651328 +0000 UTC))\\\\\\\"\\\\nF0121 15:24:36.741699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06cf2acfbc236dc530bb69dff9e6e83755c426daba101b6b6abe9c23684d2b7d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:42Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.892138 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:42Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.908131 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cec185f78f20cb42969ab7a1c67289c23ab5714e5c397b1fb5ee55540adcc80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:42Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.921728 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:42Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.936743 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:42Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.947343 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t65tk\" (UniqueName: \"kubernetes.io/projected/78b7c66b-9935-480c-bf2e-9109b6141006-kube-api-access-t65tk\") pod \"node-resolver-vg9bt\" (UID: \"78b7c66b-9935-480c-bf2e-9109b6141006\") " pod="openshift-dns/node-resolver-vg9bt" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.947415 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/78b7c66b-9935-480c-bf2e-9109b6141006-hosts-file\") pod \"node-resolver-vg9bt\" (UID: \"78b7c66b-9935-480c-bf2e-9109b6141006\") " pod="openshift-dns/node-resolver-vg9bt" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.952534 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0aed55e8987f8148a0e729da9e5dcad94bc05e1441eb8e087e05d8b81da4d2df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:42Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.967267 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97953e2c-3d85-44a4-9900-b3c53432cc90\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1f9f8b3712320f6149f9124f2c3d85a9957a64678d7f1d0a11728c00afc74cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f805306703ac31a2921b4d3bfd202ef7c3c71e5954c987ad3b4bbd4827b46568\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4543419f7cc5df1a3788158c9b4540bb0e5320349e033d6a4a495267a84f5dd8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effb345da8dd5ec896b8b390b24c11d5fafc667b987d984ea5df7647068c4f46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:42Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.974301 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.974352 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.974361 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.974379 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:42 crc kubenswrapper[5021]: I0121 15:24:42.974390 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:42Z","lastTransitionTime":"2026-01-21T15:24:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.048757 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/78b7c66b-9935-480c-bf2e-9109b6141006-hosts-file\") pod \"node-resolver-vg9bt\" (UID: \"78b7c66b-9935-480c-bf2e-9109b6141006\") " pod="openshift-dns/node-resolver-vg9bt" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.048839 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t65tk\" (UniqueName: \"kubernetes.io/projected/78b7c66b-9935-480c-bf2e-9109b6141006-kube-api-access-t65tk\") pod \"node-resolver-vg9bt\" (UID: \"78b7c66b-9935-480c-bf2e-9109b6141006\") " pod="openshift-dns/node-resolver-vg9bt" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.048984 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/78b7c66b-9935-480c-bf2e-9109b6141006-hosts-file\") pod \"node-resolver-vg9bt\" (UID: \"78b7c66b-9935-480c-bf2e-9109b6141006\") " pod="openshift-dns/node-resolver-vg9bt" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.079060 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t65tk\" (UniqueName: \"kubernetes.io/projected/78b7c66b-9935-480c-bf2e-9109b6141006-kube-api-access-t65tk\") pod \"node-resolver-vg9bt\" (UID: \"78b7c66b-9935-480c-bf2e-9109b6141006\") " pod="openshift-dns/node-resolver-vg9bt" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.079572 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.079607 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.079616 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.079631 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.079642 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:43Z","lastTransitionTime":"2026-01-21T15:24:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.138712 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-vg9bt" Jan 21 15:24:43 crc kubenswrapper[5021]: W0121 15:24:43.153711 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod78b7c66b_9935_480c_bf2e_9109b6141006.slice/crio-86b10736ace193c68a7af5114e886de20ae00e365e69f4464a8c8a19441a2025 WatchSource:0}: Error finding container 86b10736ace193c68a7af5114e886de20ae00e365e69f4464a8c8a19441a2025: Status 404 returned error can't find the container with id 86b10736ace193c68a7af5114e886de20ae00e365e69f4464a8c8a19441a2025 Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.182168 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.182202 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.182211 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.182224 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.182233 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:43Z","lastTransitionTime":"2026-01-21T15:24:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.203456 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-sd7j9"] Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.203741 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.204258 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-k9hxg"] Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.205138 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-k9hxg" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.212087 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-9flhm"] Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.213149 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.213176 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-n22xz"] Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.213434 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.213884 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.214173 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.214273 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.214485 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.214666 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.217443 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.217985 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.218103 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.220680 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.220756 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.221065 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.221109 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.221129 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.221323 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.221881 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.221961 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.222079 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.222378 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.222404 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.242976 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:43Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.254203 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a-system-cni-dir\") pod \"multus-sd7j9\" (UID: \"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\") " pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.254247 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-host-cni-netd\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.254268 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a-host-run-k8s-cni-cncf-io\") pod \"multus-sd7j9\" (UID: \"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\") " pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.254293 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a-etc-kubernetes\") pod \"multus-sd7j9\" (UID: \"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\") " pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.254324 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-log-socket\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.254352 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1-rootfs\") pod \"machine-config-daemon-n22xz\" (UID: \"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1\") " pod="openshift-machine-config-operator/machine-config-daemon-n22xz" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.254382 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1-proxy-tls\") pod \"machine-config-daemon-n22xz\" (UID: \"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1\") " pod="openshift-machine-config-operator/machine-config-daemon-n22xz" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.254469 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/ddf892f9-a048-4335-995e-de581763d230-cni-binary-copy\") pod \"multus-additional-cni-plugins-k9hxg\" (UID: \"ddf892f9-a048-4335-995e-de581763d230\") " pod="openshift-multus/multus-additional-cni-plugins-k9hxg" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.254520 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-host-kubelet\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.254540 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-host-run-ovn-kubernetes\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.254559 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5k67c\" (UniqueName: \"kubernetes.io/projected/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-kube-api-access-5k67c\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.254581 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/ddf892f9-a048-4335-995e-de581763d230-cnibin\") pod \"multus-additional-cni-plugins-k9hxg\" (UID: \"ddf892f9-a048-4335-995e-de581763d230\") " pod="openshift-multus/multus-additional-cni-plugins-k9hxg" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.254598 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a-hostroot\") pod \"multus-sd7j9\" (UID: \"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\") " pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.254617 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a-multus-socket-dir-parent\") pod \"multus-sd7j9\" (UID: \"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\") " pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.254670 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a-host-var-lib-cni-bin\") pod \"multus-sd7j9\" (UID: \"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\") " pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.254725 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/ddf892f9-a048-4335-995e-de581763d230-os-release\") pod \"multus-additional-cni-plugins-k9hxg\" (UID: \"ddf892f9-a048-4335-995e-de581763d230\") " pod="openshift-multus/multus-additional-cni-plugins-k9hxg" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.254751 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/ddf892f9-a048-4335-995e-de581763d230-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-k9hxg\" (UID: \"ddf892f9-a048-4335-995e-de581763d230\") " pod="openshift-multus/multus-additional-cni-plugins-k9hxg" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.254770 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-env-overrides\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.254793 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pqh6z\" (UniqueName: \"kubernetes.io/projected/d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1-kube-api-access-pqh6z\") pod \"machine-config-daemon-n22xz\" (UID: \"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1\") " pod="openshift-machine-config-operator/machine-config-daemon-n22xz" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.254819 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9rhz8\" (UniqueName: \"kubernetes.io/projected/49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a-kube-api-access-9rhz8\") pod \"multus-sd7j9\" (UID: \"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\") " pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.254865 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a-cni-binary-copy\") pod \"multus-sd7j9\" (UID: \"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\") " pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.254883 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a-multus-conf-dir\") pod \"multus-sd7j9\" (UID: \"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\") " pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.254901 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6nk6v\" (UniqueName: \"kubernetes.io/projected/ddf892f9-a048-4335-995e-de581763d230-kube-api-access-6nk6v\") pod \"multus-additional-cni-plugins-k9hxg\" (UID: \"ddf892f9-a048-4335-995e-de581763d230\") " pod="openshift-multus/multus-additional-cni-plugins-k9hxg" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.254945 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-host-run-netns\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.254970 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/ddf892f9-a048-4335-995e-de581763d230-system-cni-dir\") pod \"multus-additional-cni-plugins-k9hxg\" (UID: \"ddf892f9-a048-4335-995e-de581763d230\") " pod="openshift-multus/multus-additional-cni-plugins-k9hxg" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.254994 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-run-ovn\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.255018 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a-host-var-lib-cni-multus\") pod \"multus-sd7j9\" (UID: \"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\") " pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.255042 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-run-systemd\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.255065 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.255108 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a-host-var-lib-kubelet\") pod \"multus-sd7j9\" (UID: \"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\") " pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.255155 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-systemd-units\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.255179 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-host-cni-bin\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.255224 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-run-openvswitch\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.255306 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a-multus-cni-dir\") pod \"multus-sd7j9\" (UID: \"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\") " pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.255349 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a-cnibin\") pod \"multus-sd7j9\" (UID: \"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\") " pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.255384 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-var-lib-openvswitch\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.255435 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-etc-openvswitch\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.255481 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1-mcd-auth-proxy-config\") pod \"machine-config-daemon-n22xz\" (UID: \"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1\") " pod="openshift-machine-config-operator/machine-config-daemon-n22xz" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.255502 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-ovnkube-script-lib\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.255518 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-node-log\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.255552 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-ovnkube-config\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.255568 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a-host-run-netns\") pod \"multus-sd7j9\" (UID: \"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\") " pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.255590 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a-host-run-multus-certs\") pod \"multus-sd7j9\" (UID: \"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\") " pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.255613 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/ddf892f9-a048-4335-995e-de581763d230-tuning-conf-dir\") pod \"multus-additional-cni-plugins-k9hxg\" (UID: \"ddf892f9-a048-4335-995e-de581763d230\") " pod="openshift-multus/multus-additional-cni-plugins-k9hxg" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.255669 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a-os-release\") pod \"multus-sd7j9\" (UID: \"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\") " pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.255690 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-host-slash\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.255707 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a-multus-daemon-config\") pod \"multus-sd7j9\" (UID: \"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\") " pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.255726 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-ovn-node-metrics-cert\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.267625 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cec185f78f20cb42969ab7a1c67289c23ab5714e5c397b1fb5ee55540adcc80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:43Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.281325 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://da2a92bf0daa0e03e89d2a5ff93137363faab6817894aadb90e424b35c878da6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e8a3282b80d888e687ba06911f278868d7f50c3f0c52745beebc66440deeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:43Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.287309 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.287345 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.287359 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.287383 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.287400 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:43Z","lastTransitionTime":"2026-01-21T15:24:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.293110 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vg9bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78b7c66b-9935-480c-bf2e-9109b6141006\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:42Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:42Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t65tk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vg9bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:43Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.311718 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd7j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9rhz8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd7j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:43Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.341170 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bea603621cfb30c2b0a1dc2021ea515cb87cd590232f1c4696341f7eca1d4535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10f4a9dbab7b11fa3eaaae6060df484ff7e212710188c29acc1834f07310d49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbcf60368c95e2747fd275984fc6cfc714ca37c93d92edc446643a41c4022c56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39f53539825ef70fcde1b61f8e650f22c059fd674523bfe1882b9844923ce9c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6064978522c6ddd526c20221a2d4a73961beaea35b31eab9598087b1af017753\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"stHeaderAuthRequestController\\\\nI0121 15:24:36.730187 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 15:24:36.730206 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 15:24:36.730225 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 15:24:36.730232 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 15:24:36.735520 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0121 15:24:36.735652 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735731 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 15:24:36.735756 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 15:24:36.735765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 15:24:36.735770 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0121 15:24:36.737297 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0121 15:24:36.741633 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741680 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741699 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-1615556949/tls.crt::/tmp/serving-cert-1615556949/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769009060\\\\\\\\\\\\\\\" (2026-01-21 15:24:20 +0000 UTC to 2026-02-20 15:24:21 +0000 UTC (now=2026-01-21 15:24:36.741651328 +0000 UTC))\\\\\\\"\\\\nF0121 15:24:36.741699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06cf2acfbc236dc530bb69dff9e6e83755c426daba101b6b6abe9c23684d2b7d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:43Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.357024 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a-host-var-lib-cni-multus\") pod \"multus-sd7j9\" (UID: \"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\") " pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.357066 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/ddf892f9-a048-4335-995e-de581763d230-system-cni-dir\") pod \"multus-additional-cni-plugins-k9hxg\" (UID: \"ddf892f9-a048-4335-995e-de581763d230\") " pod="openshift-multus/multus-additional-cni-plugins-k9hxg" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.357091 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-run-ovn\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.357114 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a-host-var-lib-kubelet\") pod \"multus-sd7j9\" (UID: \"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\") " pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.357136 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-run-systemd\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.357158 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.357155 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a-host-var-lib-cni-multus\") pod \"multus-sd7j9\" (UID: \"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\") " pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.357181 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-systemd-units\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.357202 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-host-cni-bin\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.357232 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.357234 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-run-openvswitch\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.357252 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-systemd-units\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.357290 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-etc-openvswitch\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.357273 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-etc-openvswitch\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.357294 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a-host-var-lib-kubelet\") pod \"multus-sd7j9\" (UID: \"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\") " pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.357343 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-host-cni-bin\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.357282 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-run-openvswitch\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.357318 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-run-systemd\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.357403 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a-multus-cni-dir\") pod \"multus-sd7j9\" (UID: \"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\") " pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.357436 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a-cnibin\") pod \"multus-sd7j9\" (UID: \"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\") " pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.357455 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-var-lib-openvswitch\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.357474 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1-mcd-auth-proxy-config\") pod \"machine-config-daemon-n22xz\" (UID: \"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1\") " pod="openshift-machine-config-operator/machine-config-daemon-n22xz" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.357491 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-node-log\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.357509 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-ovnkube-config\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.357521 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-var-lib-openvswitch\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.357537 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a-cnibin\") pod \"multus-sd7j9\" (UID: \"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\") " pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.357566 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-node-log\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.357361 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-run-ovn\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.357528 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-ovnkube-script-lib\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.357641 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a-multus-cni-dir\") pod \"multus-sd7j9\" (UID: \"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\") " pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.357662 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a-host-run-netns\") pod \"multus-sd7j9\" (UID: \"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\") " pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.357687 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a-host-run-multus-certs\") pod \"multus-sd7j9\" (UID: \"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\") " pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.357694 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a-host-run-netns\") pod \"multus-sd7j9\" (UID: \"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\") " pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.357708 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/ddf892f9-a048-4335-995e-de581763d230-tuning-conf-dir\") pod \"multus-additional-cni-plugins-k9hxg\" (UID: \"ddf892f9-a048-4335-995e-de581763d230\") " pod="openshift-multus/multus-additional-cni-plugins-k9hxg" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.357734 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a-os-release\") pod \"multus-sd7j9\" (UID: \"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\") " pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.357735 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a-host-run-multus-certs\") pod \"multus-sd7j9\" (UID: \"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\") " pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.357753 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-host-slash\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.357778 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a-multus-daemon-config\") pod \"multus-sd7j9\" (UID: \"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\") " pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.357799 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-ovn-node-metrics-cert\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.357822 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a-system-cni-dir\") pod \"multus-sd7j9\" (UID: \"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\") " pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.357840 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-host-cni-netd\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.357861 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1-rootfs\") pod \"machine-config-daemon-n22xz\" (UID: \"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1\") " pod="openshift-machine-config-operator/machine-config-daemon-n22xz" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.357888 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a-host-run-k8s-cni-cncf-io\") pod \"multus-sd7j9\" (UID: \"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\") " pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.357924 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a-etc-kubernetes\") pod \"multus-sd7j9\" (UID: \"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\") " pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.357942 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-log-socket\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.357963 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1-proxy-tls\") pod \"machine-config-daemon-n22xz\" (UID: \"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1\") " pod="openshift-machine-config-operator/machine-config-daemon-n22xz" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.357991 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/ddf892f9-a048-4335-995e-de581763d230-cnibin\") pod \"multus-additional-cni-plugins-k9hxg\" (UID: \"ddf892f9-a048-4335-995e-de581763d230\") " pod="openshift-multus/multus-additional-cni-plugins-k9hxg" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.358011 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/ddf892f9-a048-4335-995e-de581763d230-cni-binary-copy\") pod \"multus-additional-cni-plugins-k9hxg\" (UID: \"ddf892f9-a048-4335-995e-de581763d230\") " pod="openshift-multus/multus-additional-cni-plugins-k9hxg" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.358034 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-host-kubelet\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.358058 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-host-run-ovn-kubernetes\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.358061 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/ddf892f9-a048-4335-995e-de581763d230-tuning-conf-dir\") pod \"multus-additional-cni-plugins-k9hxg\" (UID: \"ddf892f9-a048-4335-995e-de581763d230\") " pod="openshift-multus/multus-additional-cni-plugins-k9hxg" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.358079 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5k67c\" (UniqueName: \"kubernetes.io/projected/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-kube-api-access-5k67c\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.358116 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a-host-run-k8s-cni-cncf-io\") pod \"multus-sd7j9\" (UID: \"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\") " pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.358126 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a-multus-socket-dir-parent\") pod \"multus-sd7j9\" (UID: \"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\") " pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.358148 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a-host-var-lib-cni-bin\") pod \"multus-sd7j9\" (UID: \"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\") " pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.358172 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a-hostroot\") pod \"multus-sd7j9\" (UID: \"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\") " pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.358177 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a-os-release\") pod \"multus-sd7j9\" (UID: \"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\") " pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.358198 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/ddf892f9-a048-4335-995e-de581763d230-os-release\") pod \"multus-additional-cni-plugins-k9hxg\" (UID: \"ddf892f9-a048-4335-995e-de581763d230\") " pod="openshift-multus/multus-additional-cni-plugins-k9hxg" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.358213 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-host-slash\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.358221 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/ddf892f9-a048-4335-995e-de581763d230-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-k9hxg\" (UID: \"ddf892f9-a048-4335-995e-de581763d230\") " pod="openshift-multus/multus-additional-cni-plugins-k9hxg" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.358259 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-env-overrides\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.358287 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pqh6z\" (UniqueName: \"kubernetes.io/projected/d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1-kube-api-access-pqh6z\") pod \"machine-config-daemon-n22xz\" (UID: \"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1\") " pod="openshift-machine-config-operator/machine-config-daemon-n22xz" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.358301 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-ovnkube-config\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.358328 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9rhz8\" (UniqueName: \"kubernetes.io/projected/49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a-kube-api-access-9rhz8\") pod \"multus-sd7j9\" (UID: \"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\") " pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.358348 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-ovnkube-script-lib\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.358362 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a-cni-binary-copy\") pod \"multus-sd7j9\" (UID: \"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\") " pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.358410 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a-multus-conf-dir\") pod \"multus-sd7j9\" (UID: \"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\") " pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.358447 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6nk6v\" (UniqueName: \"kubernetes.io/projected/ddf892f9-a048-4335-995e-de581763d230-kube-api-access-6nk6v\") pod \"multus-additional-cni-plugins-k9hxg\" (UID: \"ddf892f9-a048-4335-995e-de581763d230\") " pod="openshift-multus/multus-additional-cni-plugins-k9hxg" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.358464 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-host-run-netns\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.358521 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-host-run-netns\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.358545 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a-multus-conf-dir\") pod \"multus-sd7j9\" (UID: \"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\") " pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.358576 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1-mcd-auth-proxy-config\") pod \"machine-config-daemon-n22xz\" (UID: \"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1\") " pod="openshift-machine-config-operator/machine-config-daemon-n22xz" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.358601 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a-etc-kubernetes\") pod \"multus-sd7j9\" (UID: \"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\") " pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.358661 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-log-socket\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.358711 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a-multus-socket-dir-parent\") pod \"multus-sd7j9\" (UID: \"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\") " pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.358976 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/ddf892f9-a048-4335-995e-de581763d230-cnibin\") pod \"multus-additional-cni-plugins-k9hxg\" (UID: \"ddf892f9-a048-4335-995e-de581763d230\") " pod="openshift-multus/multus-additional-cni-plugins-k9hxg" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.357196 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/ddf892f9-a048-4335-995e-de581763d230-system-cni-dir\") pod \"multus-additional-cni-plugins-k9hxg\" (UID: \"ddf892f9-a048-4335-995e-de581763d230\") " pod="openshift-multus/multus-additional-cni-plugins-k9hxg" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.359014 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-host-kubelet\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.359042 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-host-run-ovn-kubernetes\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.359052 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a-hostroot\") pod \"multus-sd7j9\" (UID: \"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\") " pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.359057 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a-host-var-lib-cni-bin\") pod \"multus-sd7j9\" (UID: \"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\") " pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.359110 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a-multus-daemon-config\") pod \"multus-sd7j9\" (UID: \"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\") " pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.359161 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/ddf892f9-a048-4335-995e-de581763d230-os-release\") pod \"multus-additional-cni-plugins-k9hxg\" (UID: \"ddf892f9-a048-4335-995e-de581763d230\") " pod="openshift-multus/multus-additional-cni-plugins-k9hxg" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.359180 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-host-cni-netd\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.359204 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1-rootfs\") pod \"machine-config-daemon-n22xz\" (UID: \"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1\") " pod="openshift-machine-config-operator/machine-config-daemon-n22xz" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.359208 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a-system-cni-dir\") pod \"multus-sd7j9\" (UID: \"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\") " pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.359714 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/ddf892f9-a048-4335-995e-de581763d230-cni-binary-copy\") pod \"multus-additional-cni-plugins-k9hxg\" (UID: \"ddf892f9-a048-4335-995e-de581763d230\") " pod="openshift-multus/multus-additional-cni-plugins-k9hxg" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.360686 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a-cni-binary-copy\") pod \"multus-sd7j9\" (UID: \"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\") " pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.362116 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-env-overrides\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.366055 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97953e2c-3d85-44a4-9900-b3c53432cc90\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1f9f8b3712320f6149f9124f2c3d85a9957a64678d7f1d0a11728c00afc74cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f805306703ac31a2921b4d3bfd202ef7c3c71e5954c987ad3b4bbd4827b46568\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4543419f7cc5df1a3788158c9b4540bb0e5320349e033d6a4a495267a84f5dd8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effb345da8dd5ec896b8b390b24c11d5fafc667b987d984ea5df7647068c4f46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:43Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.366863 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1-proxy-tls\") pod \"machine-config-daemon-n22xz\" (UID: \"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1\") " pod="openshift-machine-config-operator/machine-config-daemon-n22xz" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.367267 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-ovn-node-metrics-cert\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.367958 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/ddf892f9-a048-4335-995e-de581763d230-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-k9hxg\" (UID: \"ddf892f9-a048-4335-995e-de581763d230\") " pod="openshift-multus/multus-additional-cni-plugins-k9hxg" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.380950 5021 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2027-01-21 15:19:42 +0000 UTC, rotation deadline is 2026-10-13 10:25:14.168260324 +0000 UTC Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.381011 5021 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 6355h0m30.787251921s for next certificate rotation Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.381389 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9rhz8\" (UniqueName: \"kubernetes.io/projected/49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a-kube-api-access-9rhz8\") pod \"multus-sd7j9\" (UID: \"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\") " pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.383609 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6nk6v\" (UniqueName: \"kubernetes.io/projected/ddf892f9-a048-4335-995e-de581763d230-kube-api-access-6nk6v\") pod \"multus-additional-cni-plugins-k9hxg\" (UID: \"ddf892f9-a048-4335-995e-de581763d230\") " pod="openshift-multus/multus-additional-cni-plugins-k9hxg" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.383854 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pqh6z\" (UniqueName: \"kubernetes.io/projected/d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1-kube-api-access-pqh6z\") pod \"machine-config-daemon-n22xz\" (UID: \"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1\") " pod="openshift-machine-config-operator/machine-config-daemon-n22xz" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.384343 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5k67c\" (UniqueName: \"kubernetes.io/projected/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-kube-api-access-5k67c\") pod \"ovnkube-node-9flhm\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.384474 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:43Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.389356 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.389401 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.389409 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.389424 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.389436 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:43Z","lastTransitionTime":"2026-01-21T15:24:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.409075 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:43Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.422747 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0aed55e8987f8148a0e729da9e5dcad94bc05e1441eb8e087e05d8b81da4d2df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:43Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.440747 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:43Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.460206 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9flhm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:43Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.470665 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:43Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.482767 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-k9hxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ddf892f9-a048-4335-995e-de581763d230\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-k9hxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:43Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.492495 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.492539 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.492553 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.492569 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.492581 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:43Z","lastTransitionTime":"2026-01-21T15:24:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.493297 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n22xz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:43Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.505278 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:43Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.516953 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0aed55e8987f8148a0e729da9e5dcad94bc05e1441eb8e087e05d8b81da4d2df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:43Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.518060 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-sd7j9" Jan 21 15:24:43 crc kubenswrapper[5021]: W0121 15:24:43.527864 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod49b7ceaa_55d0_4bb2_8ff2_ee9da865da0a.slice/crio-10208da9ed56c83d4bb9d5049feba2a45917e3e450d346655ed736b702048638 WatchSource:0}: Error finding container 10208da9ed56c83d4bb9d5049feba2a45917e3e450d346655ed736b702048638: Status 404 returned error can't find the container with id 10208da9ed56c83d4bb9d5049feba2a45917e3e450d346655ed736b702048638 Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.529899 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97953e2c-3d85-44a4-9900-b3c53432cc90\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1f9f8b3712320f6149f9124f2c3d85a9957a64678d7f1d0a11728c00afc74cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f805306703ac31a2921b4d3bfd202ef7c3c71e5954c987ad3b4bbd4827b46568\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4543419f7cc5df1a3788158c9b4540bb0e5320349e033d6a4a495267a84f5dd8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effb345da8dd5ec896b8b390b24c11d5fafc667b987d984ea5df7647068c4f46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:43Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.531663 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-k9hxg" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.543007 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.544481 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd7j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9rhz8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd7j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:43Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:43 crc kubenswrapper[5021]: W0121 15:24:43.549028 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podddf892f9_a048_4335_995e_de581763d230.slice/crio-34c3407fd4128e80aa6125fe5e02e66914651451a0f1e971e41df32a0377d43f WatchSource:0}: Error finding container 34c3407fd4128e80aa6125fe5e02e66914651451a0f1e971e41df32a0377d43f: Status 404 returned error can't find the container with id 34c3407fd4128e80aa6125fe5e02e66914651451a0f1e971e41df32a0377d43f Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.553766 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.567041 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bea603621cfb30c2b0a1dc2021ea515cb87cd590232f1c4696341f7eca1d4535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10f4a9dbab7b11fa3eaaae6060df484ff7e212710188c29acc1834f07310d49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbcf60368c95e2747fd275984fc6cfc714ca37c93d92edc446643a41c4022c56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39f53539825ef70fcde1b61f8e650f22c059fd674523bfe1882b9844923ce9c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6064978522c6ddd526c20221a2d4a73961beaea35b31eab9598087b1af017753\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"stHeaderAuthRequestController\\\\nI0121 15:24:36.730187 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 15:24:36.730206 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 15:24:36.730225 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 15:24:36.730232 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 15:24:36.735520 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0121 15:24:36.735652 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735731 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 15:24:36.735756 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 15:24:36.735765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 15:24:36.735770 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0121 15:24:36.737297 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0121 15:24:36.741633 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741680 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741699 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-1615556949/tls.crt::/tmp/serving-cert-1615556949/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769009060\\\\\\\\\\\\\\\" (2026-01-21 15:24:20 +0000 UTC to 2026-02-20 15:24:21 +0000 UTC (now=2026-01-21 15:24:36.741651328 +0000 UTC))\\\\\\\"\\\\nF0121 15:24:36.741699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06cf2acfbc236dc530bb69dff9e6e83755c426daba101b6b6abe9c23684d2b7d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:43Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.582616 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cec185f78f20cb42969ab7a1c67289c23ab5714e5c397b1fb5ee55540adcc80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:43Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.595462 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.595492 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.595500 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.595514 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.595523 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:43Z","lastTransitionTime":"2026-01-21T15:24:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.596068 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://da2a92bf0daa0e03e89d2a5ff93137363faab6817894aadb90e424b35c878da6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e8a3282b80d888e687ba06911f278868d7f50c3f0c52745beebc66440deeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:43Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.606340 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vg9bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78b7c66b-9935-480c-bf2e-9109b6141006\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:42Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:42Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t65tk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vg9bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:43Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.693637 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-28 22:13:22.833499506 +0000 UTC Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.697422 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.697456 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.697465 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.697511 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.697524 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:43Z","lastTransitionTime":"2026-01-21T15:24:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.800529 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.800567 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.800576 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.800590 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.800598 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:43Z","lastTransitionTime":"2026-01-21T15:24:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.852700 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-sd7j9" event={"ID":"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a","Type":"ContainerStarted","Data":"ee3a703c230946ab5d32db22cafa6b8a8945566d53e5b97402adaf6701f3addd"} Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.852752 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-sd7j9" event={"ID":"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a","Type":"ContainerStarted","Data":"10208da9ed56c83d4bb9d5049feba2a45917e3e450d346655ed736b702048638"} Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.854874 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" event={"ID":"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1","Type":"ContainerStarted","Data":"7ce8d981292a75a56750a6c51bf6f523a6b29e0496d5b406455711ab49c4c81a"} Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.854944 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" event={"ID":"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1","Type":"ContainerStarted","Data":"e4c5895dd4f412ff99b134a78d67b65a3792a50da3f9a88407af678092a29d5b"} Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.854959 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" event={"ID":"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1","Type":"ContainerStarted","Data":"4abb47dc1287b71bb112ed0da8902d07f230cb9203290c32d57e9e6005c38e34"} Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.856663 5021 generic.go:334] "Generic (PLEG): container finished" podID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerID="1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be" exitCode=0 Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.856704 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" event={"ID":"2e3f3965-4473-46d7-a613-2ed3e4b10ad7","Type":"ContainerDied","Data":"1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be"} Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.856722 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" event={"ID":"2e3f3965-4473-46d7-a613-2ed3e4b10ad7","Type":"ContainerStarted","Data":"6712a107b7189696d54af4eb09f71e34ffcfe2dc4b07e1763c6b232a677bc1d1"} Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.858737 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-k9hxg" event={"ID":"ddf892f9-a048-4335-995e-de581763d230","Type":"ContainerStarted","Data":"a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796"} Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.858792 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-k9hxg" event={"ID":"ddf892f9-a048-4335-995e-de581763d230","Type":"ContainerStarted","Data":"34c3407fd4128e80aa6125fe5e02e66914651451a0f1e971e41df32a0377d43f"} Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.862636 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-vg9bt" event={"ID":"78b7c66b-9935-480c-bf2e-9109b6141006","Type":"ContainerStarted","Data":"e9036131b35dd8128019792e8c0100da93fec521e8b003b05f17196b85991d46"} Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.862695 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-vg9bt" event={"ID":"78b7c66b-9935-480c-bf2e-9109b6141006","Type":"ContainerStarted","Data":"86b10736ace193c68a7af5114e886de20ae00e365e69f4464a8c8a19441a2025"} Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.874462 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vg9bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78b7c66b-9935-480c-bf2e-9109b6141006\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:42Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:42Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t65tk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vg9bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:43Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.890491 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd7j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee3a703c230946ab5d32db22cafa6b8a8945566d53e5b97402adaf6701f3addd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9rhz8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd7j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:43Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.903425 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.903474 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.903485 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.903506 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.903517 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:43Z","lastTransitionTime":"2026-01-21T15:24:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.906663 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bea603621cfb30c2b0a1dc2021ea515cb87cd590232f1c4696341f7eca1d4535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10f4a9dbab7b11fa3eaaae6060df484ff7e212710188c29acc1834f07310d49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbcf60368c95e2747fd275984fc6cfc714ca37c93d92edc446643a41c4022c56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39f53539825ef70fcde1b61f8e650f22c059fd674523bfe1882b9844923ce9c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6064978522c6ddd526c20221a2d4a73961beaea35b31eab9598087b1af017753\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"stHeaderAuthRequestController\\\\nI0121 15:24:36.730187 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 15:24:36.730206 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 15:24:36.730225 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 15:24:36.730232 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 15:24:36.735520 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0121 15:24:36.735652 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735731 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 15:24:36.735756 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 15:24:36.735765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 15:24:36.735770 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0121 15:24:36.737297 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0121 15:24:36.741633 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741680 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741699 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-1615556949/tls.crt::/tmp/serving-cert-1615556949/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769009060\\\\\\\\\\\\\\\" (2026-01-21 15:24:20 +0000 UTC to 2026-02-20 15:24:21 +0000 UTC (now=2026-01-21 15:24:36.741651328 +0000 UTC))\\\\\\\"\\\\nF0121 15:24:36.741699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06cf2acfbc236dc530bb69dff9e6e83755c426daba101b6b6abe9c23684d2b7d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:43Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.919996 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cec185f78f20cb42969ab7a1c67289c23ab5714e5c397b1fb5ee55540adcc80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:43Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.936410 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://da2a92bf0daa0e03e89d2a5ff93137363faab6817894aadb90e424b35c878da6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e8a3282b80d888e687ba06911f278868d7f50c3f0c52745beebc66440deeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:43Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:43 crc kubenswrapper[5021]: I0121 15:24:43.985037 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:43Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.006501 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.006535 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.006544 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.006558 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.006567 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:44Z","lastTransitionTime":"2026-01-21T15:24:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.013127 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9flhm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:44Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.050355 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:44Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.070606 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-k9hxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ddf892f9-a048-4335-995e-de581763d230\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-k9hxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:44Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.083585 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n22xz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:44Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.095802 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:44Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.108078 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0aed55e8987f8148a0e729da9e5dcad94bc05e1441eb8e087e05d8b81da4d2df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:44Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.109346 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.109483 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.109560 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.109636 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.109715 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:44Z","lastTransitionTime":"2026-01-21T15:24:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.120420 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97953e2c-3d85-44a4-9900-b3c53432cc90\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1f9f8b3712320f6149f9124f2c3d85a9957a64678d7f1d0a11728c00afc74cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f805306703ac31a2921b4d3bfd202ef7c3c71e5954c987ad3b4bbd4827b46568\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4543419f7cc5df1a3788158c9b4540bb0e5320349e033d6a4a495267a84f5dd8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effb345da8dd5ec896b8b390b24c11d5fafc667b987d984ea5df7647068c4f46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:44Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.132595 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:44Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.150143 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9flhm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:44Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.163932 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:44Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.181032 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-k9hxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ddf892f9-a048-4335-995e-de581763d230\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-k9hxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:44Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.191579 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ce8d981292a75a56750a6c51bf6f523a6b29e0496d5b406455711ab49c4c81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c5895dd4f412ff99b134a78d67b65a3792a50da3f9a88407af678092a29d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n22xz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:44Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.202923 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97953e2c-3d85-44a4-9900-b3c53432cc90\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1f9f8b3712320f6149f9124f2c3d85a9957a64678d7f1d0a11728c00afc74cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f805306703ac31a2921b4d3bfd202ef7c3c71e5954c987ad3b4bbd4827b46568\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4543419f7cc5df1a3788158c9b4540bb0e5320349e033d6a4a495267a84f5dd8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effb345da8dd5ec896b8b390b24c11d5fafc667b987d984ea5df7647068c4f46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:44Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.212931 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.212989 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.213003 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.213023 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.213037 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:44Z","lastTransitionTime":"2026-01-21T15:24:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.217376 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:44Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.229240 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0aed55e8987f8148a0e729da9e5dcad94bc05e1441eb8e087e05d8b81da4d2df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:44Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.244103 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bea603621cfb30c2b0a1dc2021ea515cb87cd590232f1c4696341f7eca1d4535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10f4a9dbab7b11fa3eaaae6060df484ff7e212710188c29acc1834f07310d49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbcf60368c95e2747fd275984fc6cfc714ca37c93d92edc446643a41c4022c56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39f53539825ef70fcde1b61f8e650f22c059fd674523bfe1882b9844923ce9c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6064978522c6ddd526c20221a2d4a73961beaea35b31eab9598087b1af017753\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"stHeaderAuthRequestController\\\\nI0121 15:24:36.730187 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 15:24:36.730206 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 15:24:36.730225 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 15:24:36.730232 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 15:24:36.735520 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0121 15:24:36.735652 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735731 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 15:24:36.735756 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 15:24:36.735765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 15:24:36.735770 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0121 15:24:36.737297 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0121 15:24:36.741633 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741680 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741699 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-1615556949/tls.crt::/tmp/serving-cert-1615556949/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769009060\\\\\\\\\\\\\\\" (2026-01-21 15:24:20 +0000 UTC to 2026-02-20 15:24:21 +0000 UTC (now=2026-01-21 15:24:36.741651328 +0000 UTC))\\\\\\\"\\\\nF0121 15:24:36.741699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06cf2acfbc236dc530bb69dff9e6e83755c426daba101b6b6abe9c23684d2b7d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:44Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.257497 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cec185f78f20cb42969ab7a1c67289c23ab5714e5c397b1fb5ee55540adcc80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:44Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.271321 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://da2a92bf0daa0e03e89d2a5ff93137363faab6817894aadb90e424b35c878da6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e8a3282b80d888e687ba06911f278868d7f50c3f0c52745beebc66440deeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:44Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.282028 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vg9bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78b7c66b-9935-480c-bf2e-9109b6141006\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9036131b35dd8128019792e8c0100da93fec521e8b003b05f17196b85991d46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t65tk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vg9bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:44Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.294265 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd7j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee3a703c230946ab5d32db22cafa6b8a8945566d53e5b97402adaf6701f3addd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9rhz8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd7j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:44Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.315331 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.315433 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.315501 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.315565 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.315631 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:44Z","lastTransitionTime":"2026-01-21T15:24:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.418282 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.418342 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.418358 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.418384 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.418400 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:44Z","lastTransitionTime":"2026-01-21T15:24:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.474173 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.474291 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:24:44 crc kubenswrapper[5021]: E0121 15:24:44.474450 5021 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 21 15:24:44 crc kubenswrapper[5021]: E0121 15:24:44.474451 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:24:52.474407656 +0000 UTC m=+34.009521545 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:24:44 crc kubenswrapper[5021]: E0121 15:24:44.474526 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-21 15:24:52.474504558 +0000 UTC m=+34.009618447 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.521144 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.521608 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.521622 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.521643 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.521658 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:44Z","lastTransitionTime":"2026-01-21T15:24:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.575565 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:24:44 crc kubenswrapper[5021]: E0121 15:24:44.575725 5021 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 21 15:24:44 crc kubenswrapper[5021]: E0121 15:24:44.576179 5021 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 21 15:24:44 crc kubenswrapper[5021]: E0121 15:24:44.576284 5021 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.575769 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:24:44 crc kubenswrapper[5021]: E0121 15:24:44.575828 5021 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 21 15:24:44 crc kubenswrapper[5021]: E0121 15:24:44.576520 5021 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 21 15:24:44 crc kubenswrapper[5021]: E0121 15:24:44.576539 5021 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 15:24:44 crc kubenswrapper[5021]: E0121 15:24:44.576733 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-21 15:24:52.576475406 +0000 UTC m=+34.111589295 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 15:24:44 crc kubenswrapper[5021]: E0121 15:24:44.576876 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-21 15:24:52.576863757 +0000 UTC m=+34.111977646 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.577016 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:24:44 crc kubenswrapper[5021]: E0121 15:24:44.577114 5021 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 21 15:24:44 crc kubenswrapper[5021]: E0121 15:24:44.577177 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-21 15:24:52.577159675 +0000 UTC m=+34.112273564 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.623695 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.623735 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.623745 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.623761 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.623771 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:44Z","lastTransitionTime":"2026-01-21T15:24:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.693952 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-11 08:57:14.644627382 +0000 UTC Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.726109 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.726145 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.726164 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.726181 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.726190 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:44Z","lastTransitionTime":"2026-01-21T15:24:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.738086 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.738172 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:24:44 crc kubenswrapper[5021]: E0121 15:24:44.738231 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:24:44 crc kubenswrapper[5021]: E0121 15:24:44.738272 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.738401 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:24:44 crc kubenswrapper[5021]: E0121 15:24:44.738454 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.828746 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.828787 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.828798 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.828815 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.828830 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:44Z","lastTransitionTime":"2026-01-21T15:24:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.869014 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" event={"ID":"2e3f3965-4473-46d7-a613-2ed3e4b10ad7","Type":"ContainerStarted","Data":"33a648b0889c361c99d72d947f51b4bf9ed5373bb3192c06f787a46be05694d8"} Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.869075 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" event={"ID":"2e3f3965-4473-46d7-a613-2ed3e4b10ad7","Type":"ContainerStarted","Data":"bf2232b9e5a3d00ab5e6640b47c6fddffc45b84322505c86450c23e884607366"} Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.869089 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" event={"ID":"2e3f3965-4473-46d7-a613-2ed3e4b10ad7","Type":"ContainerStarted","Data":"dbaeb819c15bc6d2c7a27387f825f6f2b84a9671ca0d4440fc56b98c9bab7b52"} Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.869100 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" event={"ID":"2e3f3965-4473-46d7-a613-2ed3e4b10ad7","Type":"ContainerStarted","Data":"3346a404ecc46aaf27f9eebec820eceb15899c2da83fd513d8c1fde406d47ff8"} Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.869109 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" event={"ID":"2e3f3965-4473-46d7-a613-2ed3e4b10ad7","Type":"ContainerStarted","Data":"5ed7c1837f033493cb08cde88c75467a9fa1dda6ca0f1c72bb2f0c9dccfd0773"} Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.871518 5021 generic.go:334] "Generic (PLEG): container finished" podID="ddf892f9-a048-4335-995e-de581763d230" containerID="a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796" exitCode=0 Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.871553 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-k9hxg" event={"ID":"ddf892f9-a048-4335-995e-de581763d230","Type":"ContainerDied","Data":"a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796"} Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.886470 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:44Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.905588 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9flhm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:44Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.925137 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:44Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.932052 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.932103 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.932118 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.932142 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.932157 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:44Z","lastTransitionTime":"2026-01-21T15:24:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.941726 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-k9hxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ddf892f9-a048-4335-995e-de581763d230\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-k9hxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:44Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.956530 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ce8d981292a75a56750a6c51bf6f523a6b29e0496d5b406455711ab49c4c81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c5895dd4f412ff99b134a78d67b65a3792a50da3f9a88407af678092a29d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n22xz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:44Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.970345 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97953e2c-3d85-44a4-9900-b3c53432cc90\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1f9f8b3712320f6149f9124f2c3d85a9957a64678d7f1d0a11728c00afc74cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f805306703ac31a2921b4d3bfd202ef7c3c71e5954c987ad3b4bbd4827b46568\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4543419f7cc5df1a3788158c9b4540bb0e5320349e033d6a4a495267a84f5dd8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effb345da8dd5ec896b8b390b24c11d5fafc667b987d984ea5df7647068c4f46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:44Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.985560 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:44Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:44 crc kubenswrapper[5021]: I0121 15:24:44.998341 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0aed55e8987f8148a0e729da9e5dcad94bc05e1441eb8e087e05d8b81da4d2df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:44Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.013814 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bea603621cfb30c2b0a1dc2021ea515cb87cd590232f1c4696341f7eca1d4535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10f4a9dbab7b11fa3eaaae6060df484ff7e212710188c29acc1834f07310d49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbcf60368c95e2747fd275984fc6cfc714ca37c93d92edc446643a41c4022c56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39f53539825ef70fcde1b61f8e650f22c059fd674523bfe1882b9844923ce9c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6064978522c6ddd526c20221a2d4a73961beaea35b31eab9598087b1af017753\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"stHeaderAuthRequestController\\\\nI0121 15:24:36.730187 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 15:24:36.730206 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 15:24:36.730225 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 15:24:36.730232 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 15:24:36.735520 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0121 15:24:36.735652 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735731 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 15:24:36.735756 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 15:24:36.735765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 15:24:36.735770 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0121 15:24:36.737297 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0121 15:24:36.741633 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741680 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741699 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-1615556949/tls.crt::/tmp/serving-cert-1615556949/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769009060\\\\\\\\\\\\\\\" (2026-01-21 15:24:20 +0000 UTC to 2026-02-20 15:24:21 +0000 UTC (now=2026-01-21 15:24:36.741651328 +0000 UTC))\\\\\\\"\\\\nF0121 15:24:36.741699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06cf2acfbc236dc530bb69dff9e6e83755c426daba101b6b6abe9c23684d2b7d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:45Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.029004 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cec185f78f20cb42969ab7a1c67289c23ab5714e5c397b1fb5ee55540adcc80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:45Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.035428 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.035462 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.035471 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.035485 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.035493 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:45Z","lastTransitionTime":"2026-01-21T15:24:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.044466 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://da2a92bf0daa0e03e89d2a5ff93137363faab6817894aadb90e424b35c878da6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e8a3282b80d888e687ba06911f278868d7f50c3f0c52745beebc66440deeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:45Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.059617 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vg9bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78b7c66b-9935-480c-bf2e-9109b6141006\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9036131b35dd8128019792e8c0100da93fec521e8b003b05f17196b85991d46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t65tk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vg9bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:45Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.073562 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd7j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee3a703c230946ab5d32db22cafa6b8a8945566d53e5b97402adaf6701f3addd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9rhz8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd7j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:45Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.137744 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.137786 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.137813 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.137829 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.137839 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:45Z","lastTransitionTime":"2026-01-21T15:24:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.242948 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.242985 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.242994 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.243008 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.243019 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:45Z","lastTransitionTime":"2026-01-21T15:24:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.345270 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.345304 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.345314 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.345329 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.345339 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:45Z","lastTransitionTime":"2026-01-21T15:24:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.438106 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-dq2bd"] Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.438581 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-dq2bd" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.440399 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.440672 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.440793 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.441343 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.447803 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.447840 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.447851 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.447867 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.447878 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:45Z","lastTransitionTime":"2026-01-21T15:24:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.454022 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97953e2c-3d85-44a4-9900-b3c53432cc90\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1f9f8b3712320f6149f9124f2c3d85a9957a64678d7f1d0a11728c00afc74cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f805306703ac31a2921b4d3bfd202ef7c3c71e5954c987ad3b4bbd4827b46568\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4543419f7cc5df1a3788158c9b4540bb0e5320349e033d6a4a495267a84f5dd8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effb345da8dd5ec896b8b390b24c11d5fafc667b987d984ea5df7647068c4f46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:45Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.466876 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:45Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.478678 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0aed55e8987f8148a0e729da9e5dcad94bc05e1441eb8e087e05d8b81da4d2df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:45Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.486442 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/67cee991-0227-45a5-bb2d-226481f03fd1-serviceca\") pod \"node-ca-dq2bd\" (UID: \"67cee991-0227-45a5-bb2d-226481f03fd1\") " pod="openshift-image-registry/node-ca-dq2bd" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.486498 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8fmw9\" (UniqueName: \"kubernetes.io/projected/67cee991-0227-45a5-bb2d-226481f03fd1-kube-api-access-8fmw9\") pod \"node-ca-dq2bd\" (UID: \"67cee991-0227-45a5-bb2d-226481f03fd1\") " pod="openshift-image-registry/node-ca-dq2bd" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.486531 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/67cee991-0227-45a5-bb2d-226481f03fd1-host\") pod \"node-ca-dq2bd\" (UID: \"67cee991-0227-45a5-bb2d-226481f03fd1\") " pod="openshift-image-registry/node-ca-dq2bd" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.490522 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bea603621cfb30c2b0a1dc2021ea515cb87cd590232f1c4696341f7eca1d4535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10f4a9dbab7b11fa3eaaae6060df484ff7e212710188c29acc1834f07310d49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbcf60368c95e2747fd275984fc6cfc714ca37c93d92edc446643a41c4022c56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39f53539825ef70fcde1b61f8e650f22c059fd674523bfe1882b9844923ce9c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6064978522c6ddd526c20221a2d4a73961beaea35b31eab9598087b1af017753\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"stHeaderAuthRequestController\\\\nI0121 15:24:36.730187 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 15:24:36.730206 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 15:24:36.730225 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 15:24:36.730232 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 15:24:36.735520 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0121 15:24:36.735652 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735731 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 15:24:36.735756 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 15:24:36.735765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 15:24:36.735770 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0121 15:24:36.737297 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0121 15:24:36.741633 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741680 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741699 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-1615556949/tls.crt::/tmp/serving-cert-1615556949/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769009060\\\\\\\\\\\\\\\" (2026-01-21 15:24:20 +0000 UTC to 2026-02-20 15:24:21 +0000 UTC (now=2026-01-21 15:24:36.741651328 +0000 UTC))\\\\\\\"\\\\nF0121 15:24:36.741699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06cf2acfbc236dc530bb69dff9e6e83755c426daba101b6b6abe9c23684d2b7d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:45Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.502404 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cec185f78f20cb42969ab7a1c67289c23ab5714e5c397b1fb5ee55540adcc80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:45Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.513256 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://da2a92bf0daa0e03e89d2a5ff93137363faab6817894aadb90e424b35c878da6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e8a3282b80d888e687ba06911f278868d7f50c3f0c52745beebc66440deeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:45Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.521660 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vg9bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78b7c66b-9935-480c-bf2e-9109b6141006\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9036131b35dd8128019792e8c0100da93fec521e8b003b05f17196b85991d46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t65tk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vg9bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:45Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.533343 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd7j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee3a703c230946ab5d32db22cafa6b8a8945566d53e5b97402adaf6701f3addd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9rhz8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd7j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:45Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.550085 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.550122 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.550131 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.550146 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.550156 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:45Z","lastTransitionTime":"2026-01-21T15:24:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.552787 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:45Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.569577 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9flhm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:45Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.579110 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dq2bd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"67cee991-0227-45a5-bb2d-226481f03fd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8fmw9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:45Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dq2bd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:45Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.587719 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/67cee991-0227-45a5-bb2d-226481f03fd1-serviceca\") pod \"node-ca-dq2bd\" (UID: \"67cee991-0227-45a5-bb2d-226481f03fd1\") " pod="openshift-image-registry/node-ca-dq2bd" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.587752 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8fmw9\" (UniqueName: \"kubernetes.io/projected/67cee991-0227-45a5-bb2d-226481f03fd1-kube-api-access-8fmw9\") pod \"node-ca-dq2bd\" (UID: \"67cee991-0227-45a5-bb2d-226481f03fd1\") " pod="openshift-image-registry/node-ca-dq2bd" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.587779 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/67cee991-0227-45a5-bb2d-226481f03fd1-host\") pod \"node-ca-dq2bd\" (UID: \"67cee991-0227-45a5-bb2d-226481f03fd1\") " pod="openshift-image-registry/node-ca-dq2bd" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.587840 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/67cee991-0227-45a5-bb2d-226481f03fd1-host\") pod \"node-ca-dq2bd\" (UID: \"67cee991-0227-45a5-bb2d-226481f03fd1\") " pod="openshift-image-registry/node-ca-dq2bd" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.588577 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/67cee991-0227-45a5-bb2d-226481f03fd1-serviceca\") pod \"node-ca-dq2bd\" (UID: \"67cee991-0227-45a5-bb2d-226481f03fd1\") " pod="openshift-image-registry/node-ca-dq2bd" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.591600 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:45Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.606741 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8fmw9\" (UniqueName: \"kubernetes.io/projected/67cee991-0227-45a5-bb2d-226481f03fd1-kube-api-access-8fmw9\") pod \"node-ca-dq2bd\" (UID: \"67cee991-0227-45a5-bb2d-226481f03fd1\") " pod="openshift-image-registry/node-ca-dq2bd" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.607875 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-k9hxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ddf892f9-a048-4335-995e-de581763d230\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-k9hxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:45Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.620245 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ce8d981292a75a56750a6c51bf6f523a6b29e0496d5b406455711ab49c4c81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c5895dd4f412ff99b134a78d67b65a3792a50da3f9a88407af678092a29d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n22xz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:45Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.652395 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.652450 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.652463 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.652482 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.652494 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:45Z","lastTransitionTime":"2026-01-21T15:24:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.695006 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-16 00:47:53.120101235 +0000 UTC Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.751629 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-dq2bd" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.755402 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.755445 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.755456 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.755474 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.755483 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:45Z","lastTransitionTime":"2026-01-21T15:24:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:45 crc kubenswrapper[5021]: W0121 15:24:45.764934 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod67cee991_0227_45a5_bb2d_226481f03fd1.slice/crio-b64e57b456b89fc8fc93cd1526199a273fdfeaf9203ab327fb8948cd17de16e1 WatchSource:0}: Error finding container b64e57b456b89fc8fc93cd1526199a273fdfeaf9203ab327fb8948cd17de16e1: Status 404 returned error can't find the container with id b64e57b456b89fc8fc93cd1526199a273fdfeaf9203ab327fb8948cd17de16e1 Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.859195 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.859242 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.859256 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.859275 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.859291 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:45Z","lastTransitionTime":"2026-01-21T15:24:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.879449 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" event={"ID":"2e3f3965-4473-46d7-a613-2ed3e4b10ad7","Type":"ContainerStarted","Data":"ee527cf2f3d51077144d81d16dbb6b3563f6e40e30ba92d502b09801ecbca2ee"} Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.881828 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-dq2bd" event={"ID":"67cee991-0227-45a5-bb2d-226481f03fd1","Type":"ContainerStarted","Data":"b64e57b456b89fc8fc93cd1526199a273fdfeaf9203ab327fb8948cd17de16e1"} Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.883887 5021 generic.go:334] "Generic (PLEG): container finished" podID="ddf892f9-a048-4335-995e-de581763d230" containerID="00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c" exitCode=0 Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.883967 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-k9hxg" event={"ID":"ddf892f9-a048-4335-995e-de581763d230","Type":"ContainerDied","Data":"00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c"} Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.894783 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dq2bd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"67cee991-0227-45a5-bb2d-226481f03fd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8fmw9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:45Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dq2bd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:45Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.908482 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:45Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.925938 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9flhm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:45Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.937613 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ce8d981292a75a56750a6c51bf6f523a6b29e0496d5b406455711ab49c4c81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c5895dd4f412ff99b134a78d67b65a3792a50da3f9a88407af678092a29d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n22xz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:45Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.951751 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:45Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.965060 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.965130 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.965140 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.965183 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.965193 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:45Z","lastTransitionTime":"2026-01-21T15:24:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.967226 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-k9hxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ddf892f9-a048-4335-995e-de581763d230\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-k9hxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:45Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.979681 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97953e2c-3d85-44a4-9900-b3c53432cc90\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1f9f8b3712320f6149f9124f2c3d85a9957a64678d7f1d0a11728c00afc74cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f805306703ac31a2921b4d3bfd202ef7c3c71e5954c987ad3b4bbd4827b46568\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4543419f7cc5df1a3788158c9b4540bb0e5320349e033d6a4a495267a84f5dd8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effb345da8dd5ec896b8b390b24c11d5fafc667b987d984ea5df7647068c4f46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:45Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:45 crc kubenswrapper[5021]: I0121 15:24:45.990766 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:45Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.003174 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0aed55e8987f8148a0e729da9e5dcad94bc05e1441eb8e087e05d8b81da4d2df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:46Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.015242 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://da2a92bf0daa0e03e89d2a5ff93137363faab6817894aadb90e424b35c878da6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e8a3282b80d888e687ba06911f278868d7f50c3f0c52745beebc66440deeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:46Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.026775 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vg9bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78b7c66b-9935-480c-bf2e-9109b6141006\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9036131b35dd8128019792e8c0100da93fec521e8b003b05f17196b85991d46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t65tk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vg9bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:46Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.041341 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd7j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee3a703c230946ab5d32db22cafa6b8a8945566d53e5b97402adaf6701f3addd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9rhz8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd7j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:46Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.057112 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bea603621cfb30c2b0a1dc2021ea515cb87cd590232f1c4696341f7eca1d4535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10f4a9dbab7b11fa3eaaae6060df484ff7e212710188c29acc1834f07310d49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbcf60368c95e2747fd275984fc6cfc714ca37c93d92edc446643a41c4022c56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39f53539825ef70fcde1b61f8e650f22c059fd674523bfe1882b9844923ce9c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6064978522c6ddd526c20221a2d4a73961beaea35b31eab9598087b1af017753\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"stHeaderAuthRequestController\\\\nI0121 15:24:36.730187 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 15:24:36.730206 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 15:24:36.730225 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 15:24:36.730232 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 15:24:36.735520 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0121 15:24:36.735652 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735731 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 15:24:36.735756 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 15:24:36.735765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 15:24:36.735770 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0121 15:24:36.737297 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0121 15:24:36.741633 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741680 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741699 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-1615556949/tls.crt::/tmp/serving-cert-1615556949/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769009060\\\\\\\\\\\\\\\" (2026-01-21 15:24:20 +0000 UTC to 2026-02-20 15:24:21 +0000 UTC (now=2026-01-21 15:24:36.741651328 +0000 UTC))\\\\\\\"\\\\nF0121 15:24:36.741699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06cf2acfbc236dc530bb69dff9e6e83755c426daba101b6b6abe9c23684d2b7d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:46Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.067616 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.067674 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.067688 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.067707 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.067719 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:46Z","lastTransitionTime":"2026-01-21T15:24:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.071974 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cec185f78f20cb42969ab7a1c67289c23ab5714e5c397b1fb5ee55540adcc80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:46Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.169936 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.169974 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.169986 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.170003 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.170014 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:46Z","lastTransitionTime":"2026-01-21T15:24:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.272407 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.272457 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.272470 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.272486 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.272499 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:46Z","lastTransitionTime":"2026-01-21T15:24:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.375398 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.375443 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.375454 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.375469 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.375478 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:46Z","lastTransitionTime":"2026-01-21T15:24:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.478804 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.478856 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.478867 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.478885 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.478902 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:46Z","lastTransitionTime":"2026-01-21T15:24:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.581128 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.581168 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.581189 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.581203 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.581213 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:46Z","lastTransitionTime":"2026-01-21T15:24:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.684413 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.684454 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.684467 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.684483 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.684495 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:46Z","lastTransitionTime":"2026-01-21T15:24:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.695813 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-19 15:36:24.794772617 +0000 UTC Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.737367 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.737439 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.737376 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:24:46 crc kubenswrapper[5021]: E0121 15:24:46.737606 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:24:46 crc kubenswrapper[5021]: E0121 15:24:46.737725 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:24:46 crc kubenswrapper[5021]: E0121 15:24:46.737798 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.786654 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.786701 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.786714 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.786735 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.786748 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:46Z","lastTransitionTime":"2026-01-21T15:24:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.888638 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.888675 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.888691 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.888709 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.888721 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:46Z","lastTransitionTime":"2026-01-21T15:24:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.889231 5021 generic.go:334] "Generic (PLEG): container finished" podID="ddf892f9-a048-4335-995e-de581763d230" containerID="0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155" exitCode=0 Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.889330 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-k9hxg" event={"ID":"ddf892f9-a048-4335-995e-de581763d230","Type":"ContainerDied","Data":"0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155"} Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.893892 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-dq2bd" event={"ID":"67cee991-0227-45a5-bb2d-226481f03fd1","Type":"ContainerStarted","Data":"45a1ed486d5aef56ffd0f6ed50755a384ab94ce452ea9b4d51804fdca6eb179b"} Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.909021 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97953e2c-3d85-44a4-9900-b3c53432cc90\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1f9f8b3712320f6149f9124f2c3d85a9957a64678d7f1d0a11728c00afc74cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f805306703ac31a2921b4d3bfd202ef7c3c71e5954c987ad3b4bbd4827b46568\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4543419f7cc5df1a3788158c9b4540bb0e5320349e033d6a4a495267a84f5dd8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effb345da8dd5ec896b8b390b24c11d5fafc667b987d984ea5df7647068c4f46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:46Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.925393 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:46Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.938430 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0aed55e8987f8148a0e729da9e5dcad94bc05e1441eb8e087e05d8b81da4d2df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:46Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.961283 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bea603621cfb30c2b0a1dc2021ea515cb87cd590232f1c4696341f7eca1d4535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10f4a9dbab7b11fa3eaaae6060df484ff7e212710188c29acc1834f07310d49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbcf60368c95e2747fd275984fc6cfc714ca37c93d92edc446643a41c4022c56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39f53539825ef70fcde1b61f8e650f22c059fd674523bfe1882b9844923ce9c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6064978522c6ddd526c20221a2d4a73961beaea35b31eab9598087b1af017753\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"stHeaderAuthRequestController\\\\nI0121 15:24:36.730187 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 15:24:36.730206 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 15:24:36.730225 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 15:24:36.730232 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 15:24:36.735520 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0121 15:24:36.735652 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735731 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 15:24:36.735756 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 15:24:36.735765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 15:24:36.735770 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0121 15:24:36.737297 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0121 15:24:36.741633 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741680 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741699 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-1615556949/tls.crt::/tmp/serving-cert-1615556949/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769009060\\\\\\\\\\\\\\\" (2026-01-21 15:24:20 +0000 UTC to 2026-02-20 15:24:21 +0000 UTC (now=2026-01-21 15:24:36.741651328 +0000 UTC))\\\\\\\"\\\\nF0121 15:24:36.741699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06cf2acfbc236dc530bb69dff9e6e83755c426daba101b6b6abe9c23684d2b7d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:46Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.981489 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cec185f78f20cb42969ab7a1c67289c23ab5714e5c397b1fb5ee55540adcc80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:46Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.991519 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.991555 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.991565 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.991578 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.991587 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:46Z","lastTransitionTime":"2026-01-21T15:24:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:46 crc kubenswrapper[5021]: I0121 15:24:46.995461 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://da2a92bf0daa0e03e89d2a5ff93137363faab6817894aadb90e424b35c878da6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e8a3282b80d888e687ba06911f278868d7f50c3f0c52745beebc66440deeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:46Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.009241 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vg9bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78b7c66b-9935-480c-bf2e-9109b6141006\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9036131b35dd8128019792e8c0100da93fec521e8b003b05f17196b85991d46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t65tk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vg9bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:47Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.021653 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd7j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee3a703c230946ab5d32db22cafa6b8a8945566d53e5b97402adaf6701f3addd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9rhz8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd7j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:47Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.033071 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:47Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.054627 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9flhm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:47Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.065633 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dq2bd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"67cee991-0227-45a5-bb2d-226481f03fd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8fmw9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:45Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dq2bd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:47Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.079823 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:47Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.093657 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.093705 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.093719 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.093739 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.093755 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:47Z","lastTransitionTime":"2026-01-21T15:24:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.097951 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-k9hxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ddf892f9-a048-4335-995e-de581763d230\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-k9hxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:47Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.114861 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ce8d981292a75a56750a6c51bf6f523a6b29e0496d5b406455711ab49c4c81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c5895dd4f412ff99b134a78d67b65a3792a50da3f9a88407af678092a29d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n22xz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:47Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.132808 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:47Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.154578 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9flhm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:47Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.165399 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dq2bd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"67cee991-0227-45a5-bb2d-226481f03fd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45a1ed486d5aef56ffd0f6ed50755a384ab94ce452ea9b4d51804fdca6eb179b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8fmw9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:45Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dq2bd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:47Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.177646 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:47Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.191921 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-k9hxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ddf892f9-a048-4335-995e-de581763d230\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-k9hxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:47Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.195835 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.195875 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.195887 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.195923 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.195940 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:47Z","lastTransitionTime":"2026-01-21T15:24:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.203298 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ce8d981292a75a56750a6c51bf6f523a6b29e0496d5b406455711ab49c4c81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c5895dd4f412ff99b134a78d67b65a3792a50da3f9a88407af678092a29d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n22xz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:47Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.215871 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:47Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.229063 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0aed55e8987f8148a0e729da9e5dcad94bc05e1441eb8e087e05d8b81da4d2df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:47Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.243640 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97953e2c-3d85-44a4-9900-b3c53432cc90\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1f9f8b3712320f6149f9124f2c3d85a9957a64678d7f1d0a11728c00afc74cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f805306703ac31a2921b4d3bfd202ef7c3c71e5954c987ad3b4bbd4827b46568\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4543419f7cc5df1a3788158c9b4540bb0e5320349e033d6a4a495267a84f5dd8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effb345da8dd5ec896b8b390b24c11d5fafc667b987d984ea5df7647068c4f46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:47Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.254739 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vg9bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78b7c66b-9935-480c-bf2e-9109b6141006\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9036131b35dd8128019792e8c0100da93fec521e8b003b05f17196b85991d46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t65tk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vg9bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:47Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.269341 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd7j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee3a703c230946ab5d32db22cafa6b8a8945566d53e5b97402adaf6701f3addd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9rhz8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd7j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:47Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.284574 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bea603621cfb30c2b0a1dc2021ea515cb87cd590232f1c4696341f7eca1d4535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10f4a9dbab7b11fa3eaaae6060df484ff7e212710188c29acc1834f07310d49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbcf60368c95e2747fd275984fc6cfc714ca37c93d92edc446643a41c4022c56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39f53539825ef70fcde1b61f8e650f22c059fd674523bfe1882b9844923ce9c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6064978522c6ddd526c20221a2d4a73961beaea35b31eab9598087b1af017753\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"stHeaderAuthRequestController\\\\nI0121 15:24:36.730187 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 15:24:36.730206 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 15:24:36.730225 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 15:24:36.730232 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 15:24:36.735520 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0121 15:24:36.735652 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735731 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 15:24:36.735756 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 15:24:36.735765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 15:24:36.735770 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0121 15:24:36.737297 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0121 15:24:36.741633 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741680 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741699 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-1615556949/tls.crt::/tmp/serving-cert-1615556949/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769009060\\\\\\\\\\\\\\\" (2026-01-21 15:24:20 +0000 UTC to 2026-02-20 15:24:21 +0000 UTC (now=2026-01-21 15:24:36.741651328 +0000 UTC))\\\\\\\"\\\\nF0121 15:24:36.741699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06cf2acfbc236dc530bb69dff9e6e83755c426daba101b6b6abe9c23684d2b7d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:47Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.298807 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cec185f78f20cb42969ab7a1c67289c23ab5714e5c397b1fb5ee55540adcc80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:47Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.299451 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.299484 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.299494 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.299510 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.299523 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:47Z","lastTransitionTime":"2026-01-21T15:24:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.312077 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://da2a92bf0daa0e03e89d2a5ff93137363faab6817894aadb90e424b35c878da6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e8a3282b80d888e687ba06911f278868d7f50c3f0c52745beebc66440deeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:47Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.402420 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.402500 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.402524 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.402559 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.402580 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:47Z","lastTransitionTime":"2026-01-21T15:24:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.505648 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.505681 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.505691 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.505706 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.505716 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:47Z","lastTransitionTime":"2026-01-21T15:24:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.608586 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.608641 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.608654 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.608675 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.608690 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:47Z","lastTransitionTime":"2026-01-21T15:24:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.696899 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-05 07:51:12.073745301 +0000 UTC Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.711371 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.711403 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.711413 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.711427 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.711437 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:47Z","lastTransitionTime":"2026-01-21T15:24:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.813410 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.813440 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.813449 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.813461 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.813470 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:47Z","lastTransitionTime":"2026-01-21T15:24:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.899325 5021 generic.go:334] "Generic (PLEG): container finished" podID="ddf892f9-a048-4335-995e-de581763d230" containerID="7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f" exitCode=0 Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.899367 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-k9hxg" event={"ID":"ddf892f9-a048-4335-995e-de581763d230","Type":"ContainerDied","Data":"7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f"} Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.903872 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" event={"ID":"2e3f3965-4473-46d7-a613-2ed3e4b10ad7","Type":"ContainerStarted","Data":"fb81f970e9b92212535b8961f324f7e9904f3ead656d5aea28d1a7054245ede8"} Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.913413 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97953e2c-3d85-44a4-9900-b3c53432cc90\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1f9f8b3712320f6149f9124f2c3d85a9957a64678d7f1d0a11728c00afc74cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f805306703ac31a2921b4d3bfd202ef7c3c71e5954c987ad3b4bbd4827b46568\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4543419f7cc5df1a3788158c9b4540bb0e5320349e033d6a4a495267a84f5dd8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effb345da8dd5ec896b8b390b24c11d5fafc667b987d984ea5df7647068c4f46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:47Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.914901 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.914956 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.914965 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.914980 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.914990 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:47Z","lastTransitionTime":"2026-01-21T15:24:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.930200 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:47Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.941495 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0aed55e8987f8148a0e729da9e5dcad94bc05e1441eb8e087e05d8b81da4d2df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:47Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.953177 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bea603621cfb30c2b0a1dc2021ea515cb87cd590232f1c4696341f7eca1d4535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10f4a9dbab7b11fa3eaaae6060df484ff7e212710188c29acc1834f07310d49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbcf60368c95e2747fd275984fc6cfc714ca37c93d92edc446643a41c4022c56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39f53539825ef70fcde1b61f8e650f22c059fd674523bfe1882b9844923ce9c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6064978522c6ddd526c20221a2d4a73961beaea35b31eab9598087b1af017753\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"stHeaderAuthRequestController\\\\nI0121 15:24:36.730187 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 15:24:36.730206 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 15:24:36.730225 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 15:24:36.730232 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 15:24:36.735520 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0121 15:24:36.735652 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735731 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 15:24:36.735756 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 15:24:36.735765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 15:24:36.735770 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0121 15:24:36.737297 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0121 15:24:36.741633 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741680 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741699 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-1615556949/tls.crt::/tmp/serving-cert-1615556949/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769009060\\\\\\\\\\\\\\\" (2026-01-21 15:24:20 +0000 UTC to 2026-02-20 15:24:21 +0000 UTC (now=2026-01-21 15:24:36.741651328 +0000 UTC))\\\\\\\"\\\\nF0121 15:24:36.741699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06cf2acfbc236dc530bb69dff9e6e83755c426daba101b6b6abe9c23684d2b7d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:47Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.963948 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cec185f78f20cb42969ab7a1c67289c23ab5714e5c397b1fb5ee55540adcc80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:47Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.975650 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://da2a92bf0daa0e03e89d2a5ff93137363faab6817894aadb90e424b35c878da6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e8a3282b80d888e687ba06911f278868d7f50c3f0c52745beebc66440deeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:47Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.983991 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vg9bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78b7c66b-9935-480c-bf2e-9109b6141006\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9036131b35dd8128019792e8c0100da93fec521e8b003b05f17196b85991d46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t65tk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vg9bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:47Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:47 crc kubenswrapper[5021]: I0121 15:24:47.995185 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd7j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee3a703c230946ab5d32db22cafa6b8a8945566d53e5b97402adaf6701f3addd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9rhz8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd7j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:47Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.005846 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.031487 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.031528 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.031541 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.031559 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.031568 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:48Z","lastTransitionTime":"2026-01-21T15:24:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.029654 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9flhm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.045240 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dq2bd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"67cee991-0227-45a5-bb2d-226481f03fd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45a1ed486d5aef56ffd0f6ed50755a384ab94ce452ea9b4d51804fdca6eb179b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8fmw9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:45Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dq2bd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.056317 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.068997 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-k9hxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ddf892f9-a048-4335-995e-de581763d230\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-k9hxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.081953 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ce8d981292a75a56750a6c51bf6f523a6b29e0496d5b406455711ab49c4c81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c5895dd4f412ff99b134a78d67b65a3792a50da3f9a88407af678092a29d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n22xz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.133600 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.133641 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.133650 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.133667 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.133678 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:48Z","lastTransitionTime":"2026-01-21T15:24:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.236962 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.236997 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.237009 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.237023 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.237033 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:48Z","lastTransitionTime":"2026-01-21T15:24:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.319253 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.330997 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.338772 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.338816 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.338826 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.338840 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.338849 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:48Z","lastTransitionTime":"2026-01-21T15:24:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.344004 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-k9hxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ddf892f9-a048-4335-995e-de581763d230\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-k9hxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.354385 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ce8d981292a75a56750a6c51bf6f523a6b29e0496d5b406455711ab49c4c81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c5895dd4f412ff99b134a78d67b65a3792a50da3f9a88407af678092a29d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n22xz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.365734 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.375968 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0aed55e8987f8148a0e729da9e5dcad94bc05e1441eb8e087e05d8b81da4d2df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.387189 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97953e2c-3d85-44a4-9900-b3c53432cc90\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1f9f8b3712320f6149f9124f2c3d85a9957a64678d7f1d0a11728c00afc74cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f805306703ac31a2921b4d3bfd202ef7c3c71e5954c987ad3b4bbd4827b46568\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4543419f7cc5df1a3788158c9b4540bb0e5320349e033d6a4a495267a84f5dd8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effb345da8dd5ec896b8b390b24c11d5fafc667b987d984ea5df7647068c4f46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.395586 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vg9bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78b7c66b-9935-480c-bf2e-9109b6141006\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9036131b35dd8128019792e8c0100da93fec521e8b003b05f17196b85991d46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t65tk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vg9bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.405553 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd7j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee3a703c230946ab5d32db22cafa6b8a8945566d53e5b97402adaf6701f3addd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9rhz8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd7j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.418785 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bea603621cfb30c2b0a1dc2021ea515cb87cd590232f1c4696341f7eca1d4535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10f4a9dbab7b11fa3eaaae6060df484ff7e212710188c29acc1834f07310d49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbcf60368c95e2747fd275984fc6cfc714ca37c93d92edc446643a41c4022c56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39f53539825ef70fcde1b61f8e650f22c059fd674523bfe1882b9844923ce9c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6064978522c6ddd526c20221a2d4a73961beaea35b31eab9598087b1af017753\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"stHeaderAuthRequestController\\\\nI0121 15:24:36.730187 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 15:24:36.730206 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 15:24:36.730225 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 15:24:36.730232 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 15:24:36.735520 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0121 15:24:36.735652 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735731 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 15:24:36.735756 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 15:24:36.735765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 15:24:36.735770 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0121 15:24:36.737297 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0121 15:24:36.741633 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741680 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741699 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-1615556949/tls.crt::/tmp/serving-cert-1615556949/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769009060\\\\\\\\\\\\\\\" (2026-01-21 15:24:20 +0000 UTC to 2026-02-20 15:24:21 +0000 UTC (now=2026-01-21 15:24:36.741651328 +0000 UTC))\\\\\\\"\\\\nF0121 15:24:36.741699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06cf2acfbc236dc530bb69dff9e6e83755c426daba101b6b6abe9c23684d2b7d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.429929 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cec185f78f20cb42969ab7a1c67289c23ab5714e5c397b1fb5ee55540adcc80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.440696 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.440738 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.440749 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.440766 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.440778 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:48Z","lastTransitionTime":"2026-01-21T15:24:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.442757 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://da2a92bf0daa0e03e89d2a5ff93137363faab6817894aadb90e424b35c878da6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e8a3282b80d888e687ba06911f278868d7f50c3f0c52745beebc66440deeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.455161 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.471672 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9flhm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.481664 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dq2bd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"67cee991-0227-45a5-bb2d-226481f03fd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45a1ed486d5aef56ffd0f6ed50755a384ab94ce452ea9b4d51804fdca6eb179b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8fmw9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:45Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dq2bd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.543328 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.543366 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.543375 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.543389 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.543397 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:48Z","lastTransitionTime":"2026-01-21T15:24:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.597547 5021 transport.go:147] "Certificate rotation detected, shutting down client connections to start using new credentials" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.645478 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.645517 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.645527 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.645541 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.645550 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:48Z","lastTransitionTime":"2026-01-21T15:24:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.697485 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-28 03:50:44.865249353 +0000 UTC Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.737177 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.737247 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:24:48 crc kubenswrapper[5021]: E0121 15:24:48.737491 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:24:48 crc kubenswrapper[5021]: E0121 15:24:48.737640 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.737673 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:24:48 crc kubenswrapper[5021]: E0121 15:24:48.737776 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.748478 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.748531 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.748543 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.748562 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.748574 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:48Z","lastTransitionTime":"2026-01-21T15:24:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.751204 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bea603621cfb30c2b0a1dc2021ea515cb87cd590232f1c4696341f7eca1d4535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10f4a9dbab7b11fa3eaaae6060df484ff7e212710188c29acc1834f07310d49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbcf60368c95e2747fd275984fc6cfc714ca37c93d92edc446643a41c4022c56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39f53539825ef70fcde1b61f8e650f22c059fd674523bfe1882b9844923ce9c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6064978522c6ddd526c20221a2d4a73961beaea35b31eab9598087b1af017753\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"stHeaderAuthRequestController\\\\nI0121 15:24:36.730187 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 15:24:36.730206 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 15:24:36.730225 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 15:24:36.730232 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 15:24:36.735520 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0121 15:24:36.735652 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735731 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 15:24:36.735756 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 15:24:36.735765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 15:24:36.735770 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0121 15:24:36.737297 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0121 15:24:36.741633 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741680 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741699 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-1615556949/tls.crt::/tmp/serving-cert-1615556949/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769009060\\\\\\\\\\\\\\\" (2026-01-21 15:24:20 +0000 UTC to 2026-02-20 15:24:21 +0000 UTC (now=2026-01-21 15:24:36.741651328 +0000 UTC))\\\\\\\"\\\\nF0121 15:24:36.741699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06cf2acfbc236dc530bb69dff9e6e83755c426daba101b6b6abe9c23684d2b7d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.764501 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cec185f78f20cb42969ab7a1c67289c23ab5714e5c397b1fb5ee55540adcc80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.775148 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://da2a92bf0daa0e03e89d2a5ff93137363faab6817894aadb90e424b35c878da6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e8a3282b80d888e687ba06911f278868d7f50c3f0c52745beebc66440deeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.783331 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vg9bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78b7c66b-9935-480c-bf2e-9109b6141006\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9036131b35dd8128019792e8c0100da93fec521e8b003b05f17196b85991d46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t65tk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vg9bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.796734 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd7j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee3a703c230946ab5d32db22cafa6b8a8945566d53e5b97402adaf6701f3addd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9rhz8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd7j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.808218 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.828339 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9flhm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.837175 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dq2bd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"67cee991-0227-45a5-bb2d-226481f03fd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45a1ed486d5aef56ffd0f6ed50755a384ab94ce452ea9b4d51804fdca6eb179b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8fmw9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:45Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dq2bd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.847316 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.850075 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.850116 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.850131 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.850148 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.850160 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:48Z","lastTransitionTime":"2026-01-21T15:24:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.860456 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-k9hxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ddf892f9-a048-4335-995e-de581763d230\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-k9hxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.870646 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ce8d981292a75a56750a6c51bf6f523a6b29e0496d5b406455711ab49c4c81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c5895dd4f412ff99b134a78d67b65a3792a50da3f9a88407af678092a29d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n22xz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.881580 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97953e2c-3d85-44a4-9900-b3c53432cc90\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1f9f8b3712320f6149f9124f2c3d85a9957a64678d7f1d0a11728c00afc74cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f805306703ac31a2921b4d3bfd202ef7c3c71e5954c987ad3b4bbd4827b46568\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4543419f7cc5df1a3788158c9b4540bb0e5320349e033d6a4a495267a84f5dd8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effb345da8dd5ec896b8b390b24c11d5fafc667b987d984ea5df7647068c4f46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.891591 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.902219 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0aed55e8987f8148a0e729da9e5dcad94bc05e1441eb8e087e05d8b81da4d2df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.912169 5021 generic.go:334] "Generic (PLEG): container finished" podID="ddf892f9-a048-4335-995e-de581763d230" containerID="dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25" exitCode=0 Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.912212 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-k9hxg" event={"ID":"ddf892f9-a048-4335-995e-de581763d230","Type":"ContainerDied","Data":"dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25"} Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.926661 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bea603621cfb30c2b0a1dc2021ea515cb87cd590232f1c4696341f7eca1d4535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10f4a9dbab7b11fa3eaaae6060df484ff7e212710188c29acc1834f07310d49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbcf60368c95e2747fd275984fc6cfc714ca37c93d92edc446643a41c4022c56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39f53539825ef70fcde1b61f8e650f22c059fd674523bfe1882b9844923ce9c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6064978522c6ddd526c20221a2d4a73961beaea35b31eab9598087b1af017753\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"stHeaderAuthRequestController\\\\nI0121 15:24:36.730187 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 15:24:36.730206 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 15:24:36.730225 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 15:24:36.730232 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 15:24:36.735520 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0121 15:24:36.735652 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735731 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 15:24:36.735756 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 15:24:36.735765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 15:24:36.735770 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0121 15:24:36.737297 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0121 15:24:36.741633 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741680 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741699 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-1615556949/tls.crt::/tmp/serving-cert-1615556949/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769009060\\\\\\\\\\\\\\\" (2026-01-21 15:24:20 +0000 UTC to 2026-02-20 15:24:21 +0000 UTC (now=2026-01-21 15:24:36.741651328 +0000 UTC))\\\\\\\"\\\\nF0121 15:24:36.741699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06cf2acfbc236dc530bb69dff9e6e83755c426daba101b6b6abe9c23684d2b7d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.937900 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cec185f78f20cb42969ab7a1c67289c23ab5714e5c397b1fb5ee55540adcc80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.948557 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://da2a92bf0daa0e03e89d2a5ff93137363faab6817894aadb90e424b35c878da6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e8a3282b80d888e687ba06911f278868d7f50c3f0c52745beebc66440deeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.952371 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.952398 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.952408 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.952421 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.952431 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:48Z","lastTransitionTime":"2026-01-21T15:24:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.957207 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vg9bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78b7c66b-9935-480c-bf2e-9109b6141006\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9036131b35dd8128019792e8c0100da93fec521e8b003b05f17196b85991d46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t65tk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vg9bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.970402 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd7j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee3a703c230946ab5d32db22cafa6b8a8945566d53e5b97402adaf6701f3addd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9rhz8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd7j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.981501 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:48 crc kubenswrapper[5021]: I0121 15:24:48.996422 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9flhm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.005105 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dq2bd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"67cee991-0227-45a5-bb2d-226481f03fd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45a1ed486d5aef56ffd0f6ed50755a384ab94ce452ea9b4d51804fdca6eb179b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8fmw9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:45Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dq2bd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:49Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.016036 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:49Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.028421 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-k9hxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ddf892f9-a048-4335-995e-de581763d230\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-k9hxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:49Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.039425 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ce8d981292a75a56750a6c51bf6f523a6b29e0496d5b406455711ab49c4c81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c5895dd4f412ff99b134a78d67b65a3792a50da3f9a88407af678092a29d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n22xz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:49Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.051966 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97953e2c-3d85-44a4-9900-b3c53432cc90\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1f9f8b3712320f6149f9124f2c3d85a9957a64678d7f1d0a11728c00afc74cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f805306703ac31a2921b4d3bfd202ef7c3c71e5954c987ad3b4bbd4827b46568\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4543419f7cc5df1a3788158c9b4540bb0e5320349e033d6a4a495267a84f5dd8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effb345da8dd5ec896b8b390b24c11d5fafc667b987d984ea5df7647068c4f46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:49Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.055220 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.055271 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.055282 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.055298 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.055307 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:49Z","lastTransitionTime":"2026-01-21T15:24:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.062997 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:49Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.072271 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0aed55e8987f8148a0e729da9e5dcad94bc05e1441eb8e087e05d8b81da4d2df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:49Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.158626 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.159037 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.159051 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.159067 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.159077 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:49Z","lastTransitionTime":"2026-01-21T15:24:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.261412 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.261481 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.261492 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.261507 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.261517 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:49Z","lastTransitionTime":"2026-01-21T15:24:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.363778 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.363835 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.363846 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.363875 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.363886 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:49Z","lastTransitionTime":"2026-01-21T15:24:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.466336 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.466392 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.466406 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.466444 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.466455 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:49Z","lastTransitionTime":"2026-01-21T15:24:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.568673 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.568712 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.568721 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.568736 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.568783 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:49Z","lastTransitionTime":"2026-01-21T15:24:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.671480 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.671513 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.671521 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.671534 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.671543 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:49Z","lastTransitionTime":"2026-01-21T15:24:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.697882 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-09 01:32:32.927755839 +0000 UTC Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.699351 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.699401 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.699413 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.699428 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.699440 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:49Z","lastTransitionTime":"2026-01-21T15:24:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:49 crc kubenswrapper[5021]: E0121 15:24:49.714419 5021 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90fd653a-5482-4360-a078-7b7d7b2b9201\\\",\\\"systemUUID\\\":\\\"758eea6e-3652-403c-8df7-2cf690a0b7a2\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:49Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.718169 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.718221 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.718234 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.718253 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.718265 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:49Z","lastTransitionTime":"2026-01-21T15:24:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:49 crc kubenswrapper[5021]: E0121 15:24:49.730758 5021 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90fd653a-5482-4360-a078-7b7d7b2b9201\\\",\\\"systemUUID\\\":\\\"758eea6e-3652-403c-8df7-2cf690a0b7a2\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:49Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.734291 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.734307 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.734318 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.734329 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.734340 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:49Z","lastTransitionTime":"2026-01-21T15:24:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:49 crc kubenswrapper[5021]: E0121 15:24:49.745447 5021 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90fd653a-5482-4360-a078-7b7d7b2b9201\\\",\\\"systemUUID\\\":\\\"758eea6e-3652-403c-8df7-2cf690a0b7a2\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:49Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.749231 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.749272 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.749285 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.749303 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.749315 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:49Z","lastTransitionTime":"2026-01-21T15:24:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:49 crc kubenswrapper[5021]: E0121 15:24:49.761864 5021 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90fd653a-5482-4360-a078-7b7d7b2b9201\\\",\\\"systemUUID\\\":\\\"758eea6e-3652-403c-8df7-2cf690a0b7a2\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:49Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.770029 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.770064 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.770073 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.770088 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.770098 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:49Z","lastTransitionTime":"2026-01-21T15:24:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:49 crc kubenswrapper[5021]: E0121 15:24:49.782060 5021 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90fd653a-5482-4360-a078-7b7d7b2b9201\\\",\\\"systemUUID\\\":\\\"758eea6e-3652-403c-8df7-2cf690a0b7a2\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:49Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:49 crc kubenswrapper[5021]: E0121 15:24:49.782171 5021 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.783291 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.783312 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.783320 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.783332 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.783341 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:49Z","lastTransitionTime":"2026-01-21T15:24:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.887521 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.887572 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.887584 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.887603 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.887617 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:49Z","lastTransitionTime":"2026-01-21T15:24:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.919869 5021 generic.go:334] "Generic (PLEG): container finished" podID="ddf892f9-a048-4335-995e-de581763d230" containerID="34249e9db1c2f9f8a1d6f840f32de9cfa2b3f071de7a72784674e64607f513b4" exitCode=0 Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.919949 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-k9hxg" event={"ID":"ddf892f9-a048-4335-995e-de581763d230","Type":"ContainerDied","Data":"34249e9db1c2f9f8a1d6f840f32de9cfa2b3f071de7a72784674e64607f513b4"} Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.924217 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" event={"ID":"2e3f3965-4473-46d7-a613-2ed3e4b10ad7","Type":"ContainerStarted","Data":"f927f91074ca4f0b4b2942771432ed17cf3a16564d3324a0f9c2294905eaa009"} Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.924565 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.937510 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:49Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.949093 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.954927 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-k9hxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ddf892f9-a048-4335-995e-de581763d230\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34249e9db1c2f9f8a1d6f840f32de9cfa2b3f071de7a72784674e64607f513b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34249e9db1c2f9f8a1d6f840f32de9cfa2b3f071de7a72784674e64607f513b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-k9hxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:49Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.968127 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ce8d981292a75a56750a6c51bf6f523a6b29e0496d5b406455711ab49c4c81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c5895dd4f412ff99b134a78d67b65a3792a50da3f9a88407af678092a29d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n22xz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:49Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.985600 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0aed55e8987f8148a0e729da9e5dcad94bc05e1441eb8e087e05d8b81da4d2df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:49Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.991353 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.991393 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.991406 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.991421 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:49 crc kubenswrapper[5021]: I0121 15:24:49.991432 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:49Z","lastTransitionTime":"2026-01-21T15:24:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.001837 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97953e2c-3d85-44a4-9900-b3c53432cc90\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1f9f8b3712320f6149f9124f2c3d85a9957a64678d7f1d0a11728c00afc74cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f805306703ac31a2921b4d3bfd202ef7c3c71e5954c987ad3b4bbd4827b46568\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4543419f7cc5df1a3788158c9b4540bb0e5320349e033d6a4a495267a84f5dd8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effb345da8dd5ec896b8b390b24c11d5fafc667b987d984ea5df7647068c4f46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:49Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.016393 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:50Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.030800 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bea603621cfb30c2b0a1dc2021ea515cb87cd590232f1c4696341f7eca1d4535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10f4a9dbab7b11fa3eaaae6060df484ff7e212710188c29acc1834f07310d49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbcf60368c95e2747fd275984fc6cfc714ca37c93d92edc446643a41c4022c56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39f53539825ef70fcde1b61f8e650f22c059fd674523bfe1882b9844923ce9c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6064978522c6ddd526c20221a2d4a73961beaea35b31eab9598087b1af017753\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"stHeaderAuthRequestController\\\\nI0121 15:24:36.730187 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 15:24:36.730206 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 15:24:36.730225 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 15:24:36.730232 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 15:24:36.735520 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0121 15:24:36.735652 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735731 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 15:24:36.735756 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 15:24:36.735765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 15:24:36.735770 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0121 15:24:36.737297 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0121 15:24:36.741633 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741680 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741699 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-1615556949/tls.crt::/tmp/serving-cert-1615556949/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769009060\\\\\\\\\\\\\\\" (2026-01-21 15:24:20 +0000 UTC to 2026-02-20 15:24:21 +0000 UTC (now=2026-01-21 15:24:36.741651328 +0000 UTC))\\\\\\\"\\\\nF0121 15:24:36.741699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06cf2acfbc236dc530bb69dff9e6e83755c426daba101b6b6abe9c23684d2b7d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:50Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.047390 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cec185f78f20cb42969ab7a1c67289c23ab5714e5c397b1fb5ee55540adcc80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:50Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.060677 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://da2a92bf0daa0e03e89d2a5ff93137363faab6817894aadb90e424b35c878da6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e8a3282b80d888e687ba06911f278868d7f50c3f0c52745beebc66440deeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:50Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.074470 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vg9bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78b7c66b-9935-480c-bf2e-9109b6141006\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9036131b35dd8128019792e8c0100da93fec521e8b003b05f17196b85991d46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t65tk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vg9bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:50Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.090741 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd7j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee3a703c230946ab5d32db22cafa6b8a8945566d53e5b97402adaf6701f3addd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9rhz8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd7j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:50Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.093793 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.093832 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.093841 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.093856 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.093867 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:50Z","lastTransitionTime":"2026-01-21T15:24:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.104766 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:50Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.124688 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9flhm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:50Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.136987 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dq2bd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"67cee991-0227-45a5-bb2d-226481f03fd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45a1ed486d5aef56ffd0f6ed50755a384ab94ce452ea9b4d51804fdca6eb179b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8fmw9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:45Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dq2bd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:50Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.150075 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vg9bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78b7c66b-9935-480c-bf2e-9109b6141006\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9036131b35dd8128019792e8c0100da93fec521e8b003b05f17196b85991d46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t65tk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vg9bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:50Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.165003 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd7j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee3a703c230946ab5d32db22cafa6b8a8945566d53e5b97402adaf6701f3addd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9rhz8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd7j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:50Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.190705 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bea603621cfb30c2b0a1dc2021ea515cb87cd590232f1c4696341f7eca1d4535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10f4a9dbab7b11fa3eaaae6060df484ff7e212710188c29acc1834f07310d49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbcf60368c95e2747fd275984fc6cfc714ca37c93d92edc446643a41c4022c56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39f53539825ef70fcde1b61f8e650f22c059fd674523bfe1882b9844923ce9c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6064978522c6ddd526c20221a2d4a73961beaea35b31eab9598087b1af017753\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"stHeaderAuthRequestController\\\\nI0121 15:24:36.730187 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 15:24:36.730206 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 15:24:36.730225 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 15:24:36.730232 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 15:24:36.735520 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0121 15:24:36.735652 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735731 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 15:24:36.735756 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 15:24:36.735765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 15:24:36.735770 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0121 15:24:36.737297 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0121 15:24:36.741633 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741680 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741699 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-1615556949/tls.crt::/tmp/serving-cert-1615556949/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769009060\\\\\\\\\\\\\\\" (2026-01-21 15:24:20 +0000 UTC to 2026-02-20 15:24:21 +0000 UTC (now=2026-01-21 15:24:36.741651328 +0000 UTC))\\\\\\\"\\\\nF0121 15:24:36.741699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06cf2acfbc236dc530bb69dff9e6e83755c426daba101b6b6abe9c23684d2b7d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:50Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.196356 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.196432 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.196456 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.196486 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.196506 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:50Z","lastTransitionTime":"2026-01-21T15:24:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.205481 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cec185f78f20cb42969ab7a1c67289c23ab5714e5c397b1fb5ee55540adcc80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:50Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.222029 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://da2a92bf0daa0e03e89d2a5ff93137363faab6817894aadb90e424b35c878da6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e8a3282b80d888e687ba06911f278868d7f50c3f0c52745beebc66440deeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:50Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.237369 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:50Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.259760 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbaeb819c15bc6d2c7a27387f825f6f2b84a9671ca0d4440fc56b98c9bab7b52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf2232b9e5a3d00ab5e6640b47c6fddffc45b84322505c86450c23e884607366\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee527cf2f3d51077144d81d16dbb6b3563f6e40e30ba92d502b09801ecbca2ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://33a648b0889c361c99d72d947f51b4bf9ed5373bb3192c06f787a46be05694d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3346a404ecc46aaf27f9eebec820eceb15899c2da83fd513d8c1fde406d47ff8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ed7c1837f033493cb08cde88c75467a9fa1dda6ca0f1c72bb2f0c9dccfd0773\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f927f91074ca4f0b4b2942771432ed17cf3a16564d3324a0f9c2294905eaa009\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb81f970e9b92212535b8961f324f7e9904f3ead656d5aea28d1a7054245ede8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9flhm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:50Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.271758 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dq2bd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"67cee991-0227-45a5-bb2d-226481f03fd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45a1ed486d5aef56ffd0f6ed50755a384ab94ce452ea9b4d51804fdca6eb179b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8fmw9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:45Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dq2bd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:50Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.288280 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:50Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.298353 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.298394 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.298404 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.298418 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.298428 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:50Z","lastTransitionTime":"2026-01-21T15:24:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.306832 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-k9hxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ddf892f9-a048-4335-995e-de581763d230\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34249e9db1c2f9f8a1d6f840f32de9cfa2b3f071de7a72784674e64607f513b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34249e9db1c2f9f8a1d6f840f32de9cfa2b3f071de7a72784674e64607f513b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-k9hxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:50Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.319295 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ce8d981292a75a56750a6c51bf6f523a6b29e0496d5b406455711ab49c4c81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c5895dd4f412ff99b134a78d67b65a3792a50da3f9a88407af678092a29d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n22xz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:50Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.334605 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:50Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.349873 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0aed55e8987f8148a0e729da9e5dcad94bc05e1441eb8e087e05d8b81da4d2df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:50Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.362148 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97953e2c-3d85-44a4-9900-b3c53432cc90\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1f9f8b3712320f6149f9124f2c3d85a9957a64678d7f1d0a11728c00afc74cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f805306703ac31a2921b4d3bfd202ef7c3c71e5954c987ad3b4bbd4827b46568\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4543419f7cc5df1a3788158c9b4540bb0e5320349e033d6a4a495267a84f5dd8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effb345da8dd5ec896b8b390b24c11d5fafc667b987d984ea5df7647068c4f46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:50Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.401885 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.402237 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.402254 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.402274 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.402288 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:50Z","lastTransitionTime":"2026-01-21T15:24:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.504701 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.504743 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.504753 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.504770 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.504782 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:50Z","lastTransitionTime":"2026-01-21T15:24:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.607149 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.607234 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.607253 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.607277 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.607293 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:50Z","lastTransitionTime":"2026-01-21T15:24:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.698318 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-26 11:18:43.58970136 +0000 UTC Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.710877 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.710936 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.710953 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.710971 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.710982 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:50Z","lastTransitionTime":"2026-01-21T15:24:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.737574 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.737632 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.737620 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:24:50 crc kubenswrapper[5021]: E0121 15:24:50.737964 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:24:50 crc kubenswrapper[5021]: E0121 15:24:50.738062 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:24:50 crc kubenswrapper[5021]: E0121 15:24:50.738160 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.814899 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.815011 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.815031 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.815063 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.815083 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:50Z","lastTransitionTime":"2026-01-21T15:24:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.917500 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.917862 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.918108 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.918197 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.918260 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:50Z","lastTransitionTime":"2026-01-21T15:24:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.934649 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-k9hxg" event={"ID":"ddf892f9-a048-4335-995e-de581763d230","Type":"ContainerStarted","Data":"d45ae12ddf6d1ac783e31194495a45834964d0379bda6db0655e0194e6f6273b"} Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.935244 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.935459 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.953375 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:50Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.955709 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.968582 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-k9hxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ddf892f9-a048-4335-995e-de581763d230\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d45ae12ddf6d1ac783e31194495a45834964d0379bda6db0655e0194e6f6273b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34249e9db1c2f9f8a1d6f840f32de9cfa2b3f071de7a72784674e64607f513b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34249e9db1c2f9f8a1d6f840f32de9cfa2b3f071de7a72784674e64607f513b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-k9hxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:50Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.980975 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ce8d981292a75a56750a6c51bf6f523a6b29e0496d5b406455711ab49c4c81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c5895dd4f412ff99b134a78d67b65a3792a50da3f9a88407af678092a29d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n22xz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:50Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:50 crc kubenswrapper[5021]: I0121 15:24:50.994844 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97953e2c-3d85-44a4-9900-b3c53432cc90\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1f9f8b3712320f6149f9124f2c3d85a9957a64678d7f1d0a11728c00afc74cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f805306703ac31a2921b4d3bfd202ef7c3c71e5954c987ad3b4bbd4827b46568\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4543419f7cc5df1a3788158c9b4540bb0e5320349e033d6a4a495267a84f5dd8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effb345da8dd5ec896b8b390b24c11d5fafc667b987d984ea5df7647068c4f46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:50Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.007951 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:51Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.020876 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.020960 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.020971 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.020988 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.021000 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:51Z","lastTransitionTime":"2026-01-21T15:24:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.023140 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0aed55e8987f8148a0e729da9e5dcad94bc05e1441eb8e087e05d8b81da4d2df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:51Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.040770 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bea603621cfb30c2b0a1dc2021ea515cb87cd590232f1c4696341f7eca1d4535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10f4a9dbab7b11fa3eaaae6060df484ff7e212710188c29acc1834f07310d49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbcf60368c95e2747fd275984fc6cfc714ca37c93d92edc446643a41c4022c56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39f53539825ef70fcde1b61f8e650f22c059fd674523bfe1882b9844923ce9c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6064978522c6ddd526c20221a2d4a73961beaea35b31eab9598087b1af017753\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"stHeaderAuthRequestController\\\\nI0121 15:24:36.730187 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 15:24:36.730206 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 15:24:36.730225 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 15:24:36.730232 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 15:24:36.735520 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0121 15:24:36.735652 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735731 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 15:24:36.735756 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 15:24:36.735765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 15:24:36.735770 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0121 15:24:36.737297 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0121 15:24:36.741633 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741680 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741699 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-1615556949/tls.crt::/tmp/serving-cert-1615556949/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769009060\\\\\\\\\\\\\\\" (2026-01-21 15:24:20 +0000 UTC to 2026-02-20 15:24:21 +0000 UTC (now=2026-01-21 15:24:36.741651328 +0000 UTC))\\\\\\\"\\\\nF0121 15:24:36.741699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06cf2acfbc236dc530bb69dff9e6e83755c426daba101b6b6abe9c23684d2b7d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:51Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.054462 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cec185f78f20cb42969ab7a1c67289c23ab5714e5c397b1fb5ee55540adcc80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:51Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.067057 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://da2a92bf0daa0e03e89d2a5ff93137363faab6817894aadb90e424b35c878da6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e8a3282b80d888e687ba06911f278868d7f50c3f0c52745beebc66440deeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:51Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.080344 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vg9bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78b7c66b-9935-480c-bf2e-9109b6141006\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9036131b35dd8128019792e8c0100da93fec521e8b003b05f17196b85991d46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t65tk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vg9bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:51Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.096721 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd7j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee3a703c230946ab5d32db22cafa6b8a8945566d53e5b97402adaf6701f3addd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9rhz8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd7j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:51Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.114471 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:51Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.124733 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.124791 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.124803 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.124826 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.124840 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:51Z","lastTransitionTime":"2026-01-21T15:24:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.137213 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbaeb819c15bc6d2c7a27387f825f6f2b84a9671ca0d4440fc56b98c9bab7b52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf2232b9e5a3d00ab5e6640b47c6fddffc45b84322505c86450c23e884607366\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee527cf2f3d51077144d81d16dbb6b3563f6e40e30ba92d502b09801ecbca2ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://33a648b0889c361c99d72d947f51b4bf9ed5373bb3192c06f787a46be05694d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3346a404ecc46aaf27f9eebec820eceb15899c2da83fd513d8c1fde406d47ff8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ed7c1837f033493cb08cde88c75467a9fa1dda6ca0f1c72bb2f0c9dccfd0773\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f927f91074ca4f0b4b2942771432ed17cf3a16564d3324a0f9c2294905eaa009\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb81f970e9b92212535b8961f324f7e9904f3ead656d5aea28d1a7054245ede8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9flhm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:51Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.155976 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dq2bd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"67cee991-0227-45a5-bb2d-226481f03fd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45a1ed486d5aef56ffd0f6ed50755a384ab94ce452ea9b4d51804fdca6eb179b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8fmw9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:45Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dq2bd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:51Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.199239 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbaeb819c15bc6d2c7a27387f825f6f2b84a9671ca0d4440fc56b98c9bab7b52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf2232b9e5a3d00ab5e6640b47c6fddffc45b84322505c86450c23e884607366\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee527cf2f3d51077144d81d16dbb6b3563f6e40e30ba92d502b09801ecbca2ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://33a648b0889c361c99d72d947f51b4bf9ed5373bb3192c06f787a46be05694d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3346a404ecc46aaf27f9eebec820eceb15899c2da83fd513d8c1fde406d47ff8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ed7c1837f033493cb08cde88c75467a9fa1dda6ca0f1c72bb2f0c9dccfd0773\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f927f91074ca4f0b4b2942771432ed17cf3a16564d3324a0f9c2294905eaa009\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb81f970e9b92212535b8961f324f7e9904f3ead656d5aea28d1a7054245ede8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9flhm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:51Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.221572 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dq2bd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"67cee991-0227-45a5-bb2d-226481f03fd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45a1ed486d5aef56ffd0f6ed50755a384ab94ce452ea9b4d51804fdca6eb179b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8fmw9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:45Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dq2bd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:51Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.226817 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.227078 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.227176 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.227270 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.227348 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:51Z","lastTransitionTime":"2026-01-21T15:24:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.248499 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:51Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.271699 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-k9hxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ddf892f9-a048-4335-995e-de581763d230\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d45ae12ddf6d1ac783e31194495a45834964d0379bda6db0655e0194e6f6273b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34249e9db1c2f9f8a1d6f840f32de9cfa2b3f071de7a72784674e64607f513b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34249e9db1c2f9f8a1d6f840f32de9cfa2b3f071de7a72784674e64607f513b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-k9hxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:51Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.299708 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ce8d981292a75a56750a6c51bf6f523a6b29e0496d5b406455711ab49c4c81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c5895dd4f412ff99b134a78d67b65a3792a50da3f9a88407af678092a29d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n22xz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:51Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.316397 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:51Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.328876 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97953e2c-3d85-44a4-9900-b3c53432cc90\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1f9f8b3712320f6149f9124f2c3d85a9957a64678d7f1d0a11728c00afc74cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f805306703ac31a2921b4d3bfd202ef7c3c71e5954c987ad3b4bbd4827b46568\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4543419f7cc5df1a3788158c9b4540bb0e5320349e033d6a4a495267a84f5dd8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effb345da8dd5ec896b8b390b24c11d5fafc667b987d984ea5df7647068c4f46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:51Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.330298 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.330351 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.330370 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.330393 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.330408 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:51Z","lastTransitionTime":"2026-01-21T15:24:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.343809 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:51Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.358649 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0aed55e8987f8148a0e729da9e5dcad94bc05e1441eb8e087e05d8b81da4d2df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:51Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.374957 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cec185f78f20cb42969ab7a1c67289c23ab5714e5c397b1fb5ee55540adcc80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:51Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.388485 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://da2a92bf0daa0e03e89d2a5ff93137363faab6817894aadb90e424b35c878da6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e8a3282b80d888e687ba06911f278868d7f50c3f0c52745beebc66440deeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:51Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.400874 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vg9bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78b7c66b-9935-480c-bf2e-9109b6141006\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9036131b35dd8128019792e8c0100da93fec521e8b003b05f17196b85991d46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t65tk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vg9bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:51Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.415595 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd7j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee3a703c230946ab5d32db22cafa6b8a8945566d53e5b97402adaf6701f3addd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9rhz8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd7j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:51Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.431469 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bea603621cfb30c2b0a1dc2021ea515cb87cd590232f1c4696341f7eca1d4535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10f4a9dbab7b11fa3eaaae6060df484ff7e212710188c29acc1834f07310d49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbcf60368c95e2747fd275984fc6cfc714ca37c93d92edc446643a41c4022c56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39f53539825ef70fcde1b61f8e650f22c059fd674523bfe1882b9844923ce9c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6064978522c6ddd526c20221a2d4a73961beaea35b31eab9598087b1af017753\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"stHeaderAuthRequestController\\\\nI0121 15:24:36.730187 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 15:24:36.730206 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 15:24:36.730225 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 15:24:36.730232 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 15:24:36.735520 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0121 15:24:36.735652 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735731 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 15:24:36.735756 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 15:24:36.735765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 15:24:36.735770 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0121 15:24:36.737297 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0121 15:24:36.741633 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741680 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741699 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-1615556949/tls.crt::/tmp/serving-cert-1615556949/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769009060\\\\\\\\\\\\\\\" (2026-01-21 15:24:20 +0000 UTC to 2026-02-20 15:24:21 +0000 UTC (now=2026-01-21 15:24:36.741651328 +0000 UTC))\\\\\\\"\\\\nF0121 15:24:36.741699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06cf2acfbc236dc530bb69dff9e6e83755c426daba101b6b6abe9c23684d2b7d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:51Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.433200 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.433271 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.433290 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.433312 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.433326 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:51Z","lastTransitionTime":"2026-01-21T15:24:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.536044 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.536094 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.536105 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.536125 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.536137 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:51Z","lastTransitionTime":"2026-01-21T15:24:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.639157 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.639200 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.639209 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.639227 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.639239 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:51Z","lastTransitionTime":"2026-01-21T15:24:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.699861 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-09 02:58:37.385179159 +0000 UTC Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.744351 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.744402 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.744413 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.744433 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.744447 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:51Z","lastTransitionTime":"2026-01-21T15:24:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.847445 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.847496 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.847507 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.847531 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.847545 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:51Z","lastTransitionTime":"2026-01-21T15:24:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.950047 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.950122 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.950158 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.950181 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:51 crc kubenswrapper[5021]: I0121 15:24:51.950192 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:51Z","lastTransitionTime":"2026-01-21T15:24:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.053797 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.053875 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.053888 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.053955 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.053972 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:52Z","lastTransitionTime":"2026-01-21T15:24:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.157193 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.157243 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.157255 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.157275 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.157290 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:52Z","lastTransitionTime":"2026-01-21T15:24:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.261677 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.261751 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.261764 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.261783 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.261795 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:52Z","lastTransitionTime":"2026-01-21T15:24:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.364853 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.364937 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.364961 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.364989 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.365001 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:52Z","lastTransitionTime":"2026-01-21T15:24:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.468486 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.468547 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.468564 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.468587 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.468604 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:52Z","lastTransitionTime":"2026-01-21T15:24:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.556387 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:24:52 crc kubenswrapper[5021]: E0121 15:24:52.556657 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:25:08.556616197 +0000 UTC m=+50.091730106 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.556710 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:24:52 crc kubenswrapper[5021]: E0121 15:24:52.556864 5021 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 21 15:24:52 crc kubenswrapper[5021]: E0121 15:24:52.556957 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-21 15:25:08.556938316 +0000 UTC m=+50.092052205 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.571602 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.571660 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.571679 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.571703 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.571720 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:52Z","lastTransitionTime":"2026-01-21T15:24:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.658233 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.658278 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.658298 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:24:52 crc kubenswrapper[5021]: E0121 15:24:52.658390 5021 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 21 15:24:52 crc kubenswrapper[5021]: E0121 15:24:52.658435 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-21 15:25:08.6584224 +0000 UTC m=+50.193536289 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 21 15:24:52 crc kubenswrapper[5021]: E0121 15:24:52.658436 5021 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 21 15:24:52 crc kubenswrapper[5021]: E0121 15:24:52.658465 5021 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 21 15:24:52 crc kubenswrapper[5021]: E0121 15:24:52.658476 5021 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 15:24:52 crc kubenswrapper[5021]: E0121 15:24:52.658522 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-21 15:25:08.658507503 +0000 UTC m=+50.193621392 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 15:24:52 crc kubenswrapper[5021]: E0121 15:24:52.658436 5021 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 21 15:24:52 crc kubenswrapper[5021]: E0121 15:24:52.658541 5021 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 21 15:24:52 crc kubenswrapper[5021]: E0121 15:24:52.658547 5021 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 15:24:52 crc kubenswrapper[5021]: E0121 15:24:52.658567 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-21 15:25:08.658560804 +0000 UTC m=+50.193674693 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.674717 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.674780 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.674797 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.674821 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.674834 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:52Z","lastTransitionTime":"2026-01-21T15:24:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.701070 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-14 10:38:41.145203325 +0000 UTC Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.737334 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.737334 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:24:52 crc kubenswrapper[5021]: E0121 15:24:52.737476 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.737344 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:24:52 crc kubenswrapper[5021]: E0121 15:24:52.737597 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:24:52 crc kubenswrapper[5021]: E0121 15:24:52.737630 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.781239 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.781301 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.781318 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.781338 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.781351 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:52Z","lastTransitionTime":"2026-01-21T15:24:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.883345 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.883398 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.883409 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.883432 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.883447 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:52Z","lastTransitionTime":"2026-01-21T15:24:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.943777 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-9flhm_2e3f3965-4473-46d7-a613-2ed3e4b10ad7/ovnkube-controller/0.log" Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.947618 5021 generic.go:334] "Generic (PLEG): container finished" podID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerID="f927f91074ca4f0b4b2942771432ed17cf3a16564d3324a0f9c2294905eaa009" exitCode=1 Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.947698 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" event={"ID":"2e3f3965-4473-46d7-a613-2ed3e4b10ad7","Type":"ContainerDied","Data":"f927f91074ca4f0b4b2942771432ed17cf3a16564d3324a0f9c2294905eaa009"} Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.949117 5021 scope.go:117] "RemoveContainer" containerID="f927f91074ca4f0b4b2942771432ed17cf3a16564d3324a0f9c2294905eaa009" Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.968564 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cec185f78f20cb42969ab7a1c67289c23ab5714e5c397b1fb5ee55540adcc80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:52Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.986517 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://da2a92bf0daa0e03e89d2a5ff93137363faab6817894aadb90e424b35c878da6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e8a3282b80d888e687ba06911f278868d7f50c3f0c52745beebc66440deeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:52Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.986889 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.986978 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.986992 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.987021 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.987036 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:52Z","lastTransitionTime":"2026-01-21T15:24:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:52 crc kubenswrapper[5021]: I0121 15:24:52.997995 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vg9bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78b7c66b-9935-480c-bf2e-9109b6141006\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9036131b35dd8128019792e8c0100da93fec521e8b003b05f17196b85991d46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t65tk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vg9bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:52Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.017783 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd7j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee3a703c230946ab5d32db22cafa6b8a8945566d53e5b97402adaf6701f3addd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9rhz8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd7j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:53Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.035356 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bea603621cfb30c2b0a1dc2021ea515cb87cd590232f1c4696341f7eca1d4535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10f4a9dbab7b11fa3eaaae6060df484ff7e212710188c29acc1834f07310d49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbcf60368c95e2747fd275984fc6cfc714ca37c93d92edc446643a41c4022c56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39f53539825ef70fcde1b61f8e650f22c059fd674523bfe1882b9844923ce9c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6064978522c6ddd526c20221a2d4a73961beaea35b31eab9598087b1af017753\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"stHeaderAuthRequestController\\\\nI0121 15:24:36.730187 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 15:24:36.730206 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 15:24:36.730225 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 15:24:36.730232 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 15:24:36.735520 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0121 15:24:36.735652 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735731 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 15:24:36.735756 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 15:24:36.735765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 15:24:36.735770 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0121 15:24:36.737297 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0121 15:24:36.741633 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741680 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741699 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-1615556949/tls.crt::/tmp/serving-cert-1615556949/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769009060\\\\\\\\\\\\\\\" (2026-01-21 15:24:20 +0000 UTC to 2026-02-20 15:24:21 +0000 UTC (now=2026-01-21 15:24:36.741651328 +0000 UTC))\\\\\\\"\\\\nF0121 15:24:36.741699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06cf2acfbc236dc530bb69dff9e6e83755c426daba101b6b6abe9c23684d2b7d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:53Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.059804 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbaeb819c15bc6d2c7a27387f825f6f2b84a9671ca0d4440fc56b98c9bab7b52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf2232b9e5a3d00ab5e6640b47c6fddffc45b84322505c86450c23e884607366\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee527cf2f3d51077144d81d16dbb6b3563f6e40e30ba92d502b09801ecbca2ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://33a648b0889c361c99d72d947f51b4bf9ed5373bb3192c06f787a46be05694d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3346a404ecc46aaf27f9eebec820eceb15899c2da83fd513d8c1fde406d47ff8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ed7c1837f033493cb08cde88c75467a9fa1dda6ca0f1c72bb2f0c9dccfd0773\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f927f91074ca4f0b4b2942771432ed17cf3a16564d3324a0f9c2294905eaa009\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f927f91074ca4f0b4b2942771432ed17cf3a16564d3324a0f9c2294905eaa009\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T15:24:52Z\\\",\\\"message\\\":\\\"pis/informers/externalversions/factory.go:140\\\\nI0121 15:24:51.815417 6335 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0121 15:24:51.816126 6335 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0121 15:24:51.816175 6335 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0121 15:24:51.816213 6335 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0121 15:24:51.816220 6335 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0121 15:24:51.816236 6335 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0121 15:24:51.816241 6335 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0121 15:24:51.816257 6335 factory.go:656] Stopping watch factory\\\\nI0121 15:24:51.816276 6335 ovnkube.go:599] Stopped ovnkube\\\\nI0121 15:24:51.816277 6335 handler.go:208] Removed *v1.Node event handler 2\\\\nI0121 15:24:51.816298 6335 handler.go:208] Removed *v1.Node event handler 7\\\\nI0121 15:24:51.816298 6335 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0121 15:24:51.816308 6335 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0121 15:24:51.816312 6335 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0121 15\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb81f970e9b92212535b8961f324f7e9904f3ead656d5aea28d1a7054245ede8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9flhm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:53Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.074803 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dq2bd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"67cee991-0227-45a5-bb2d-226481f03fd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45a1ed486d5aef56ffd0f6ed50755a384ab94ce452ea9b4d51804fdca6eb179b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8fmw9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:45Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dq2bd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:53Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.089436 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:53Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.090877 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.090958 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.090972 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.090990 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.091001 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:53Z","lastTransitionTime":"2026-01-21T15:24:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.108792 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-k9hxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ddf892f9-a048-4335-995e-de581763d230\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d45ae12ddf6d1ac783e31194495a45834964d0379bda6db0655e0194e6f6273b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34249e9db1c2f9f8a1d6f840f32de9cfa2b3f071de7a72784674e64607f513b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34249e9db1c2f9f8a1d6f840f32de9cfa2b3f071de7a72784674e64607f513b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-k9hxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:53Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.124840 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ce8d981292a75a56750a6c51bf6f523a6b29e0496d5b406455711ab49c4c81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c5895dd4f412ff99b134a78d67b65a3792a50da3f9a88407af678092a29d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n22xz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:53Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.138689 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:53Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.151840 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97953e2c-3d85-44a4-9900-b3c53432cc90\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1f9f8b3712320f6149f9124f2c3d85a9957a64678d7f1d0a11728c00afc74cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f805306703ac31a2921b4d3bfd202ef7c3c71e5954c987ad3b4bbd4827b46568\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4543419f7cc5df1a3788158c9b4540bb0e5320349e033d6a4a495267a84f5dd8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effb345da8dd5ec896b8b390b24c11d5fafc667b987d984ea5df7647068c4f46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:53Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.170808 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:53Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.187405 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0aed55e8987f8148a0e729da9e5dcad94bc05e1441eb8e087e05d8b81da4d2df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:53Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.193942 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.194012 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.194025 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.194047 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.194059 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:53Z","lastTransitionTime":"2026-01-21T15:24:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.297695 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.297750 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.297763 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.297782 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.297794 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:53Z","lastTransitionTime":"2026-01-21T15:24:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.401370 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.401436 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.401448 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.401471 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.401488 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:53Z","lastTransitionTime":"2026-01-21T15:24:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.503721 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.503760 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.503770 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.503798 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.503807 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:53Z","lastTransitionTime":"2026-01-21T15:24:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.606816 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.606872 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.606884 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.606917 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.606931 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:53Z","lastTransitionTime":"2026-01-21T15:24:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.702080 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-03 02:37:47.490588417 +0000 UTC Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.709010 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.709049 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.709060 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.709076 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.709089 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:53Z","lastTransitionTime":"2026-01-21T15:24:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.811328 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.811370 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.811380 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.811395 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.811408 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:53Z","lastTransitionTime":"2026-01-21T15:24:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.914116 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.914154 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.914163 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.914177 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.914187 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:53Z","lastTransitionTime":"2026-01-21T15:24:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.953744 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-9flhm_2e3f3965-4473-46d7-a613-2ed3e4b10ad7/ovnkube-controller/0.log" Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.957793 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" event={"ID":"2e3f3965-4473-46d7-a613-2ed3e4b10ad7","Type":"ContainerStarted","Data":"9ea556125c81d85d41ebe6a38fcace0b317ecb58c429ae80ae0772790243d107"} Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.958309 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.972937 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ce8d981292a75a56750a6c51bf6f523a6b29e0496d5b406455711ab49c4c81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c5895dd4f412ff99b134a78d67b65a3792a50da3f9a88407af678092a29d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n22xz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:53Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:53 crc kubenswrapper[5021]: I0121 15:24:53.990132 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:53Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.005895 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-k9hxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ddf892f9-a048-4335-995e-de581763d230\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d45ae12ddf6d1ac783e31194495a45834964d0379bda6db0655e0194e6f6273b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34249e9db1c2f9f8a1d6f840f32de9cfa2b3f071de7a72784674e64607f513b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34249e9db1c2f9f8a1d6f840f32de9cfa2b3f071de7a72784674e64607f513b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-k9hxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:54Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.015983 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.016057 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.016072 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.016094 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.016109 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:54Z","lastTransitionTime":"2026-01-21T15:24:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.020480 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97953e2c-3d85-44a4-9900-b3c53432cc90\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1f9f8b3712320f6149f9124f2c3d85a9957a64678d7f1d0a11728c00afc74cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f805306703ac31a2921b4d3bfd202ef7c3c71e5954c987ad3b4bbd4827b46568\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4543419f7cc5df1a3788158c9b4540bb0e5320349e033d6a4a495267a84f5dd8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effb345da8dd5ec896b8b390b24c11d5fafc667b987d984ea5df7647068c4f46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:54Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.036195 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:54Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.049244 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0aed55e8987f8148a0e729da9e5dcad94bc05e1441eb8e087e05d8b81da4d2df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:54Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.064224 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://da2a92bf0daa0e03e89d2a5ff93137363faab6817894aadb90e424b35c878da6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e8a3282b80d888e687ba06911f278868d7f50c3f0c52745beebc66440deeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:54Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.073879 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vg9bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78b7c66b-9935-480c-bf2e-9109b6141006\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9036131b35dd8128019792e8c0100da93fec521e8b003b05f17196b85991d46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t65tk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vg9bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:54Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.086810 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd7j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee3a703c230946ab5d32db22cafa6b8a8945566d53e5b97402adaf6701f3addd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9rhz8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd7j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:54Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.100176 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bea603621cfb30c2b0a1dc2021ea515cb87cd590232f1c4696341f7eca1d4535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10f4a9dbab7b11fa3eaaae6060df484ff7e212710188c29acc1834f07310d49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbcf60368c95e2747fd275984fc6cfc714ca37c93d92edc446643a41c4022c56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39f53539825ef70fcde1b61f8e650f22c059fd674523bfe1882b9844923ce9c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6064978522c6ddd526c20221a2d4a73961beaea35b31eab9598087b1af017753\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"stHeaderAuthRequestController\\\\nI0121 15:24:36.730187 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 15:24:36.730206 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 15:24:36.730225 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 15:24:36.730232 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 15:24:36.735520 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0121 15:24:36.735652 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735731 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 15:24:36.735756 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 15:24:36.735765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 15:24:36.735770 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0121 15:24:36.737297 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0121 15:24:36.741633 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741680 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741699 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-1615556949/tls.crt::/tmp/serving-cert-1615556949/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769009060\\\\\\\\\\\\\\\" (2026-01-21 15:24:20 +0000 UTC to 2026-02-20 15:24:21 +0000 UTC (now=2026-01-21 15:24:36.741651328 +0000 UTC))\\\\\\\"\\\\nF0121 15:24:36.741699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06cf2acfbc236dc530bb69dff9e6e83755c426daba101b6b6abe9c23684d2b7d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:54Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.113927 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cec185f78f20cb42969ab7a1c67289c23ab5714e5c397b1fb5ee55540adcc80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:54Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.118476 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.118527 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.118539 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.118555 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.118568 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:54Z","lastTransitionTime":"2026-01-21T15:24:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.125641 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dq2bd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"67cee991-0227-45a5-bb2d-226481f03fd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45a1ed486d5aef56ffd0f6ed50755a384ab94ce452ea9b4d51804fdca6eb179b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8fmw9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:45Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dq2bd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:54Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.139317 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:54Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.160156 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbaeb819c15bc6d2c7a27387f825f6f2b84a9671ca0d4440fc56b98c9bab7b52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf2232b9e5a3d00ab5e6640b47c6fddffc45b84322505c86450c23e884607366\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee527cf2f3d51077144d81d16dbb6b3563f6e40e30ba92d502b09801ecbca2ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://33a648b0889c361c99d72d947f51b4bf9ed5373bb3192c06f787a46be05694d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3346a404ecc46aaf27f9eebec820eceb15899c2da83fd513d8c1fde406d47ff8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ed7c1837f033493cb08cde88c75467a9fa1dda6ca0f1c72bb2f0c9dccfd0773\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ea556125c81d85d41ebe6a38fcace0b317ecb58c429ae80ae0772790243d107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f927f91074ca4f0b4b2942771432ed17cf3a16564d3324a0f9c2294905eaa009\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T15:24:52Z\\\",\\\"message\\\":\\\"pis/informers/externalversions/factory.go:140\\\\nI0121 15:24:51.815417 6335 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0121 15:24:51.816126 6335 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0121 15:24:51.816175 6335 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0121 15:24:51.816213 6335 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0121 15:24:51.816220 6335 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0121 15:24:51.816236 6335 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0121 15:24:51.816241 6335 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0121 15:24:51.816257 6335 factory.go:656] Stopping watch factory\\\\nI0121 15:24:51.816276 6335 ovnkube.go:599] Stopped ovnkube\\\\nI0121 15:24:51.816277 6335 handler.go:208] Removed *v1.Node event handler 2\\\\nI0121 15:24:51.816298 6335 handler.go:208] Removed *v1.Node event handler 7\\\\nI0121 15:24:51.816298 6335 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0121 15:24:51.816308 6335 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0121 15:24:51.816312 6335 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0121 15\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:49Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb81f970e9b92212535b8961f324f7e9904f3ead656d5aea28d1a7054245ede8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9flhm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:54Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.221382 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.221462 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.221475 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.221498 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.221508 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:54Z","lastTransitionTime":"2026-01-21T15:24:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.324535 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.324576 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.324586 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.324603 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.324612 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:54Z","lastTransitionTime":"2026-01-21T15:24:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.426892 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.426999 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.427029 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.427065 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.427088 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:54Z","lastTransitionTime":"2026-01-21T15:24:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.529822 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.529896 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.529944 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.529971 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.529991 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:54Z","lastTransitionTime":"2026-01-21T15:24:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.632033 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.632081 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.632095 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.632113 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.632128 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:54Z","lastTransitionTime":"2026-01-21T15:24:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.703075 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-18 17:15:10.494874862 +0000 UTC Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.734133 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.734177 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.734186 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.734203 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.734216 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:54Z","lastTransitionTime":"2026-01-21T15:24:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.737782 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.737866 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.737819 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:24:54 crc kubenswrapper[5021]: E0121 15:24:54.737990 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:24:54 crc kubenswrapper[5021]: E0121 15:24:54.738106 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:24:54 crc kubenswrapper[5021]: E0121 15:24:54.738185 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.837103 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.837139 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.837150 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.837162 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.837171 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:54Z","lastTransitionTime":"2026-01-21T15:24:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.939489 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.939544 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.939559 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.939580 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.939593 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:54Z","lastTransitionTime":"2026-01-21T15:24:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.963234 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-9flhm_2e3f3965-4473-46d7-a613-2ed3e4b10ad7/ovnkube-controller/1.log" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.964014 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-9flhm_2e3f3965-4473-46d7-a613-2ed3e4b10ad7/ovnkube-controller/0.log" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.967009 5021 generic.go:334] "Generic (PLEG): container finished" podID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerID="9ea556125c81d85d41ebe6a38fcace0b317ecb58c429ae80ae0772790243d107" exitCode=1 Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.967077 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" event={"ID":"2e3f3965-4473-46d7-a613-2ed3e4b10ad7","Type":"ContainerDied","Data":"9ea556125c81d85d41ebe6a38fcace0b317ecb58c429ae80ae0772790243d107"} Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.967141 5021 scope.go:117] "RemoveContainer" containerID="f927f91074ca4f0b4b2942771432ed17cf3a16564d3324a0f9c2294905eaa009" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.968191 5021 scope.go:117] "RemoveContainer" containerID="9ea556125c81d85d41ebe6a38fcace0b317ecb58c429ae80ae0772790243d107" Jan 21 15:24:54 crc kubenswrapper[5021]: E0121 15:24:54.968418 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-9flhm_openshift-ovn-kubernetes(2e3f3965-4473-46d7-a613-2ed3e4b10ad7)\"" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.983043 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bea603621cfb30c2b0a1dc2021ea515cb87cd590232f1c4696341f7eca1d4535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10f4a9dbab7b11fa3eaaae6060df484ff7e212710188c29acc1834f07310d49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbcf60368c95e2747fd275984fc6cfc714ca37c93d92edc446643a41c4022c56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39f53539825ef70fcde1b61f8e650f22c059fd674523bfe1882b9844923ce9c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6064978522c6ddd526c20221a2d4a73961beaea35b31eab9598087b1af017753\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"stHeaderAuthRequestController\\\\nI0121 15:24:36.730187 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 15:24:36.730206 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 15:24:36.730225 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 15:24:36.730232 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 15:24:36.735520 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0121 15:24:36.735652 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735731 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 15:24:36.735756 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 15:24:36.735765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 15:24:36.735770 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0121 15:24:36.737297 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0121 15:24:36.741633 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741680 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741699 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-1615556949/tls.crt::/tmp/serving-cert-1615556949/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769009060\\\\\\\\\\\\\\\" (2026-01-21 15:24:20 +0000 UTC to 2026-02-20 15:24:21 +0000 UTC (now=2026-01-21 15:24:36.741651328 +0000 UTC))\\\\\\\"\\\\nF0121 15:24:36.741699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06cf2acfbc236dc530bb69dff9e6e83755c426daba101b6b6abe9c23684d2b7d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:54Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:54 crc kubenswrapper[5021]: I0121 15:24:54.997824 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cec185f78f20cb42969ab7a1c67289c23ab5714e5c397b1fb5ee55540adcc80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:54Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.015937 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://da2a92bf0daa0e03e89d2a5ff93137363faab6817894aadb90e424b35c878da6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e8a3282b80d888e687ba06911f278868d7f50c3f0c52745beebc66440deeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:55Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.028493 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vg9bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78b7c66b-9935-480c-bf2e-9109b6141006\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9036131b35dd8128019792e8c0100da93fec521e8b003b05f17196b85991d46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t65tk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vg9bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:55Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.045566 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.045648 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.045665 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.045689 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.045706 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:55Z","lastTransitionTime":"2026-01-21T15:24:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.045987 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd7j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee3a703c230946ab5d32db22cafa6b8a8945566d53e5b97402adaf6701f3addd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9rhz8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd7j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:55Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.059940 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:55Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.082938 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbaeb819c15bc6d2c7a27387f825f6f2b84a9671ca0d4440fc56b98c9bab7b52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf2232b9e5a3d00ab5e6640b47c6fddffc45b84322505c86450c23e884607366\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee527cf2f3d51077144d81d16dbb6b3563f6e40e30ba92d502b09801ecbca2ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://33a648b0889c361c99d72d947f51b4bf9ed5373bb3192c06f787a46be05694d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3346a404ecc46aaf27f9eebec820eceb15899c2da83fd513d8c1fde406d47ff8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ed7c1837f033493cb08cde88c75467a9fa1dda6ca0f1c72bb2f0c9dccfd0773\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ea556125c81d85d41ebe6a38fcace0b317ecb58c429ae80ae0772790243d107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f927f91074ca4f0b4b2942771432ed17cf3a16564d3324a0f9c2294905eaa009\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T15:24:52Z\\\",\\\"message\\\":\\\"pis/informers/externalversions/factory.go:140\\\\nI0121 15:24:51.815417 6335 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0121 15:24:51.816126 6335 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0121 15:24:51.816175 6335 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0121 15:24:51.816213 6335 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0121 15:24:51.816220 6335 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0121 15:24:51.816236 6335 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0121 15:24:51.816241 6335 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0121 15:24:51.816257 6335 factory.go:656] Stopping watch factory\\\\nI0121 15:24:51.816276 6335 ovnkube.go:599] Stopped ovnkube\\\\nI0121 15:24:51.816277 6335 handler.go:208] Removed *v1.Node event handler 2\\\\nI0121 15:24:51.816298 6335 handler.go:208] Removed *v1.Node event handler 7\\\\nI0121 15:24:51.816298 6335 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0121 15:24:51.816308 6335 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0121 15:24:51.816312 6335 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0121 15\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:49Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9ea556125c81d85d41ebe6a38fcace0b317ecb58c429ae80ae0772790243d107\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T15:24:54Z\\\",\\\"message\\\":\\\"\\\\nI0121 15:24:53.899464 6479 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0121 15:24:53.899488 6479 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0121 15:24:53.899532 6479 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0121 15:24:53.899495 6479 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0121 15:24:53.899556 6479 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0121 15:24:53.899588 6479 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0121 15:24:53.899608 6479 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0121 15:24:53.899643 6479 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0121 15:24:53.899726 6479 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0121 15:24:53.899733 6479 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0121 15:24:53.899739 6479 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0121 15:24:53.899774 6479 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0121 15:24:53.899780 6479 handler.go:208] Removed *v1.Node event handler 7\\\\nI0121 15:24:53.899819 6479 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0121 15:24:53.899810 6479 factory.go:656] Stopping watch factory\\\\nI0121 15:24:53.899846 6479 ovnkube.go:599] Stopped ovnkube\\\\nI01\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb81f970e9b92212535b8961f324f7e9904f3ead656d5aea28d1a7054245ede8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9flhm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:55Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.096995 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dq2bd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"67cee991-0227-45a5-bb2d-226481f03fd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45a1ed486d5aef56ffd0f6ed50755a384ab94ce452ea9b4d51804fdca6eb179b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8fmw9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:45Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dq2bd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:55Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.111555 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:55Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.131186 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-k9hxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ddf892f9-a048-4335-995e-de581763d230\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d45ae12ddf6d1ac783e31194495a45834964d0379bda6db0655e0194e6f6273b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34249e9db1c2f9f8a1d6f840f32de9cfa2b3f071de7a72784674e64607f513b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34249e9db1c2f9f8a1d6f840f32de9cfa2b3f071de7a72784674e64607f513b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-k9hxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:55Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.144785 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ce8d981292a75a56750a6c51bf6f523a6b29e0496d5b406455711ab49c4c81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c5895dd4f412ff99b134a78d67b65a3792a50da3f9a88407af678092a29d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n22xz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:55Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.148897 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.148950 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.148961 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.148977 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.148990 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:55Z","lastTransitionTime":"2026-01-21T15:24:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.159249 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0aed55e8987f8148a0e729da9e5dcad94bc05e1441eb8e087e05d8b81da4d2df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:55Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.173204 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97953e2c-3d85-44a4-9900-b3c53432cc90\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1f9f8b3712320f6149f9124f2c3d85a9957a64678d7f1d0a11728c00afc74cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f805306703ac31a2921b4d3bfd202ef7c3c71e5954c987ad3b4bbd4827b46568\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4543419f7cc5df1a3788158c9b4540bb0e5320349e033d6a4a495267a84f5dd8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effb345da8dd5ec896b8b390b24c11d5fafc667b987d984ea5df7647068c4f46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:55Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.190628 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:55Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.252616 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.252689 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.252712 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.253167 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.253231 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:55Z","lastTransitionTime":"2026-01-21T15:24:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.354616 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-597dt"] Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.355605 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.355654 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.355672 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.355701 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.355719 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:55Z","lastTransitionTime":"2026-01-21T15:24:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.356269 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-597dt" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.358197 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.358565 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.370898 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97953e2c-3d85-44a4-9900-b3c53432cc90\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1f9f8b3712320f6149f9124f2c3d85a9957a64678d7f1d0a11728c00afc74cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f805306703ac31a2921b4d3bfd202ef7c3c71e5954c987ad3b4bbd4827b46568\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4543419f7cc5df1a3788158c9b4540bb0e5320349e033d6a4a495267a84f5dd8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effb345da8dd5ec896b8b390b24c11d5fafc667b987d984ea5df7647068c4f46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:55Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.383636 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:55Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.398583 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0aed55e8987f8148a0e729da9e5dcad94bc05e1441eb8e087e05d8b81da4d2df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:55Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.417084 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bea603621cfb30c2b0a1dc2021ea515cb87cd590232f1c4696341f7eca1d4535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10f4a9dbab7b11fa3eaaae6060df484ff7e212710188c29acc1834f07310d49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbcf60368c95e2747fd275984fc6cfc714ca37c93d92edc446643a41c4022c56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39f53539825ef70fcde1b61f8e650f22c059fd674523bfe1882b9844923ce9c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6064978522c6ddd526c20221a2d4a73961beaea35b31eab9598087b1af017753\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"stHeaderAuthRequestController\\\\nI0121 15:24:36.730187 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 15:24:36.730206 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 15:24:36.730225 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 15:24:36.730232 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 15:24:36.735520 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0121 15:24:36.735652 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735731 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 15:24:36.735756 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 15:24:36.735765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 15:24:36.735770 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0121 15:24:36.737297 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0121 15:24:36.741633 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741680 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741699 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-1615556949/tls.crt::/tmp/serving-cert-1615556949/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769009060\\\\\\\\\\\\\\\" (2026-01-21 15:24:20 +0000 UTC to 2026-02-20 15:24:21 +0000 UTC (now=2026-01-21 15:24:36.741651328 +0000 UTC))\\\\\\\"\\\\nF0121 15:24:36.741699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06cf2acfbc236dc530bb69dff9e6e83755c426daba101b6b6abe9c23684d2b7d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:55Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.429643 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cec185f78f20cb42969ab7a1c67289c23ab5714e5c397b1fb5ee55540adcc80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:55Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.445503 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://da2a92bf0daa0e03e89d2a5ff93137363faab6817894aadb90e424b35c878da6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e8a3282b80d888e687ba06911f278868d7f50c3f0c52745beebc66440deeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:55Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.459532 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.459958 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.460090 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.460214 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.460329 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:55Z","lastTransitionTime":"2026-01-21T15:24:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.462103 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vg9bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78b7c66b-9935-480c-bf2e-9109b6141006\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9036131b35dd8128019792e8c0100da93fec521e8b003b05f17196b85991d46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t65tk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vg9bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:55Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.478268 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd7j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee3a703c230946ab5d32db22cafa6b8a8945566d53e5b97402adaf6701f3addd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9rhz8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd7j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:55Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.489007 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/2265b990-9ae6-48a6-b93e-c91bf08a41f1-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-597dt\" (UID: \"2265b990-9ae6-48a6-b93e-c91bf08a41f1\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-597dt" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.489308 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/2265b990-9ae6-48a6-b93e-c91bf08a41f1-env-overrides\") pod \"ovnkube-control-plane-749d76644c-597dt\" (UID: \"2265b990-9ae6-48a6-b93e-c91bf08a41f1\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-597dt" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.489476 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wgbzj\" (UniqueName: \"kubernetes.io/projected/2265b990-9ae6-48a6-b93e-c91bf08a41f1-kube-api-access-wgbzj\") pod \"ovnkube-control-plane-749d76644c-597dt\" (UID: \"2265b990-9ae6-48a6-b93e-c91bf08a41f1\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-597dt" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.489588 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/2265b990-9ae6-48a6-b93e-c91bf08a41f1-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-597dt\" (UID: \"2265b990-9ae6-48a6-b93e-c91bf08a41f1\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-597dt" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.495400 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-597dt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2265b990-9ae6-48a6-b93e-c91bf08a41f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgbzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgbzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-597dt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:55Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.511804 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:55Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.532888 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbaeb819c15bc6d2c7a27387f825f6f2b84a9671ca0d4440fc56b98c9bab7b52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf2232b9e5a3d00ab5e6640b47c6fddffc45b84322505c86450c23e884607366\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee527cf2f3d51077144d81d16dbb6b3563f6e40e30ba92d502b09801ecbca2ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://33a648b0889c361c99d72d947f51b4bf9ed5373bb3192c06f787a46be05694d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3346a404ecc46aaf27f9eebec820eceb15899c2da83fd513d8c1fde406d47ff8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ed7c1837f033493cb08cde88c75467a9fa1dda6ca0f1c72bb2f0c9dccfd0773\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ea556125c81d85d41ebe6a38fcace0b317ecb58c429ae80ae0772790243d107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f927f91074ca4f0b4b2942771432ed17cf3a16564d3324a0f9c2294905eaa009\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T15:24:52Z\\\",\\\"message\\\":\\\"pis/informers/externalversions/factory.go:140\\\\nI0121 15:24:51.815417 6335 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0121 15:24:51.816126 6335 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0121 15:24:51.816175 6335 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0121 15:24:51.816213 6335 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0121 15:24:51.816220 6335 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0121 15:24:51.816236 6335 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0121 15:24:51.816241 6335 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0121 15:24:51.816257 6335 factory.go:656] Stopping watch factory\\\\nI0121 15:24:51.816276 6335 ovnkube.go:599] Stopped ovnkube\\\\nI0121 15:24:51.816277 6335 handler.go:208] Removed *v1.Node event handler 2\\\\nI0121 15:24:51.816298 6335 handler.go:208] Removed *v1.Node event handler 7\\\\nI0121 15:24:51.816298 6335 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0121 15:24:51.816308 6335 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0121 15:24:51.816312 6335 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0121 15\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:49Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9ea556125c81d85d41ebe6a38fcace0b317ecb58c429ae80ae0772790243d107\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T15:24:54Z\\\",\\\"message\\\":\\\"\\\\nI0121 15:24:53.899464 6479 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0121 15:24:53.899488 6479 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0121 15:24:53.899532 6479 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0121 15:24:53.899495 6479 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0121 15:24:53.899556 6479 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0121 15:24:53.899588 6479 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0121 15:24:53.899608 6479 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0121 15:24:53.899643 6479 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0121 15:24:53.899726 6479 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0121 15:24:53.899733 6479 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0121 15:24:53.899739 6479 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0121 15:24:53.899774 6479 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0121 15:24:53.899780 6479 handler.go:208] Removed *v1.Node event handler 7\\\\nI0121 15:24:53.899819 6479 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0121 15:24:53.899810 6479 factory.go:656] Stopping watch factory\\\\nI0121 15:24:53.899846 6479 ovnkube.go:599] Stopped ovnkube\\\\nI01\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb81f970e9b92212535b8961f324f7e9904f3ead656d5aea28d1a7054245ede8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9flhm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:55Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.545486 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dq2bd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"67cee991-0227-45a5-bb2d-226481f03fd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45a1ed486d5aef56ffd0f6ed50755a384ab94ce452ea9b4d51804fdca6eb179b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8fmw9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:45Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dq2bd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:55Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.559837 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:55Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.563546 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.563589 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.563600 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.563622 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.563632 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:55Z","lastTransitionTime":"2026-01-21T15:24:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.575257 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-k9hxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ddf892f9-a048-4335-995e-de581763d230\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d45ae12ddf6d1ac783e31194495a45834964d0379bda6db0655e0194e6f6273b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34249e9db1c2f9f8a1d6f840f32de9cfa2b3f071de7a72784674e64607f513b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34249e9db1c2f9f8a1d6f840f32de9cfa2b3f071de7a72784674e64607f513b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-k9hxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:55Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.590949 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/2265b990-9ae6-48a6-b93e-c91bf08a41f1-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-597dt\" (UID: \"2265b990-9ae6-48a6-b93e-c91bf08a41f1\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-597dt" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.591015 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/2265b990-9ae6-48a6-b93e-c91bf08a41f1-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-597dt\" (UID: \"2265b990-9ae6-48a6-b93e-c91bf08a41f1\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-597dt" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.591047 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/2265b990-9ae6-48a6-b93e-c91bf08a41f1-env-overrides\") pod \"ovnkube-control-plane-749d76644c-597dt\" (UID: \"2265b990-9ae6-48a6-b93e-c91bf08a41f1\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-597dt" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.591107 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wgbzj\" (UniqueName: \"kubernetes.io/projected/2265b990-9ae6-48a6-b93e-c91bf08a41f1-kube-api-access-wgbzj\") pod \"ovnkube-control-plane-749d76644c-597dt\" (UID: \"2265b990-9ae6-48a6-b93e-c91bf08a41f1\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-597dt" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.591953 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/2265b990-9ae6-48a6-b93e-c91bf08a41f1-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-597dt\" (UID: \"2265b990-9ae6-48a6-b93e-c91bf08a41f1\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-597dt" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.592340 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/2265b990-9ae6-48a6-b93e-c91bf08a41f1-env-overrides\") pod \"ovnkube-control-plane-749d76644c-597dt\" (UID: \"2265b990-9ae6-48a6-b93e-c91bf08a41f1\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-597dt" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.593021 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ce8d981292a75a56750a6c51bf6f523a6b29e0496d5b406455711ab49c4c81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c5895dd4f412ff99b134a78d67b65a3792a50da3f9a88407af678092a29d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n22xz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:55Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.597888 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/2265b990-9ae6-48a6-b93e-c91bf08a41f1-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-597dt\" (UID: \"2265b990-9ae6-48a6-b93e-c91bf08a41f1\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-597dt" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.607715 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wgbzj\" (UniqueName: \"kubernetes.io/projected/2265b990-9ae6-48a6-b93e-c91bf08a41f1-kube-api-access-wgbzj\") pod \"ovnkube-control-plane-749d76644c-597dt\" (UID: \"2265b990-9ae6-48a6-b93e-c91bf08a41f1\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-597dt" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.666667 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.666722 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.666733 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.666756 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.666769 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:55Z","lastTransitionTime":"2026-01-21T15:24:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.668108 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-597dt" Jan 21 15:24:55 crc kubenswrapper[5021]: W0121 15:24:55.684084 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2265b990_9ae6_48a6_b93e_c91bf08a41f1.slice/crio-79baf4dc969d8394816dcb88e04636f977fe583925740a15a7aa41b77e1e827d WatchSource:0}: Error finding container 79baf4dc969d8394816dcb88e04636f977fe583925740a15a7aa41b77e1e827d: Status 404 returned error can't find the container with id 79baf4dc969d8394816dcb88e04636f977fe583925740a15a7aa41b77e1e827d Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.703633 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-15 17:45:16.347251396 +0000 UTC Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.769489 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.769870 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.769988 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.770545 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.770646 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:55Z","lastTransitionTime":"2026-01-21T15:24:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.872849 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.872892 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.872924 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.872947 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.872962 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:55Z","lastTransitionTime":"2026-01-21T15:24:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.972756 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-9flhm_2e3f3965-4473-46d7-a613-2ed3e4b10ad7/ovnkube-controller/1.log" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.975836 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.975885 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.975895 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.975929 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.975941 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:55Z","lastTransitionTime":"2026-01-21T15:24:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.977829 5021 scope.go:117] "RemoveContainer" containerID="9ea556125c81d85d41ebe6a38fcace0b317ecb58c429ae80ae0772790243d107" Jan 21 15:24:55 crc kubenswrapper[5021]: E0121 15:24:55.978013 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-9flhm_openshift-ovn-kubernetes(2e3f3965-4473-46d7-a613-2ed3e4b10ad7)\"" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.980502 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-597dt" event={"ID":"2265b990-9ae6-48a6-b93e-c91bf08a41f1","Type":"ContainerStarted","Data":"96d3eaf7bd96bfc2526f43d3a02a4d61c9ebfd3562ee0cbbe3a3fe19ddf2429c"} Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.980553 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-597dt" event={"ID":"2265b990-9ae6-48a6-b93e-c91bf08a41f1","Type":"ContainerStarted","Data":"7264e502c2112ab6e5c6ef790a042b5e3fd99d2f1fc117ca3c1245997cabdf59"} Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.980569 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-597dt" event={"ID":"2265b990-9ae6-48a6-b93e-c91bf08a41f1","Type":"ContainerStarted","Data":"79baf4dc969d8394816dcb88e04636f977fe583925740a15a7aa41b77e1e827d"} Jan 21 15:24:55 crc kubenswrapper[5021]: I0121 15:24:55.995194 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97953e2c-3d85-44a4-9900-b3c53432cc90\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1f9f8b3712320f6149f9124f2c3d85a9957a64678d7f1d0a11728c00afc74cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f805306703ac31a2921b4d3bfd202ef7c3c71e5954c987ad3b4bbd4827b46568\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4543419f7cc5df1a3788158c9b4540bb0e5320349e033d6a4a495267a84f5dd8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effb345da8dd5ec896b8b390b24c11d5fafc667b987d984ea5df7647068c4f46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:55Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.013015 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:56Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.029017 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0aed55e8987f8148a0e729da9e5dcad94bc05e1441eb8e087e05d8b81da4d2df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:56Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.049610 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://da2a92bf0daa0e03e89d2a5ff93137363faab6817894aadb90e424b35c878da6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e8a3282b80d888e687ba06911f278868d7f50c3f0c52745beebc66440deeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:56Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.065201 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vg9bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78b7c66b-9935-480c-bf2e-9109b6141006\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9036131b35dd8128019792e8c0100da93fec521e8b003b05f17196b85991d46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t65tk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vg9bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:56Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.078677 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.078726 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.078739 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.078759 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.078778 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:56Z","lastTransitionTime":"2026-01-21T15:24:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.087533 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd7j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee3a703c230946ab5d32db22cafa6b8a8945566d53e5b97402adaf6701f3addd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9rhz8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd7j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:56Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.105260 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-597dt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2265b990-9ae6-48a6-b93e-c91bf08a41f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgbzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgbzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-597dt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:56Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.121638 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bea603621cfb30c2b0a1dc2021ea515cb87cd590232f1c4696341f7eca1d4535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10f4a9dbab7b11fa3eaaae6060df484ff7e212710188c29acc1834f07310d49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbcf60368c95e2747fd275984fc6cfc714ca37c93d92edc446643a41c4022c56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39f53539825ef70fcde1b61f8e650f22c059fd674523bfe1882b9844923ce9c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6064978522c6ddd526c20221a2d4a73961beaea35b31eab9598087b1af017753\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"stHeaderAuthRequestController\\\\nI0121 15:24:36.730187 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 15:24:36.730206 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 15:24:36.730225 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 15:24:36.730232 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 15:24:36.735520 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0121 15:24:36.735652 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735731 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 15:24:36.735756 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 15:24:36.735765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 15:24:36.735770 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0121 15:24:36.737297 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0121 15:24:36.741633 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741680 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741699 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-1615556949/tls.crt::/tmp/serving-cert-1615556949/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769009060\\\\\\\\\\\\\\\" (2026-01-21 15:24:20 +0000 UTC to 2026-02-20 15:24:21 +0000 UTC (now=2026-01-21 15:24:36.741651328 +0000 UTC))\\\\\\\"\\\\nF0121 15:24:36.741699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06cf2acfbc236dc530bb69dff9e6e83755c426daba101b6b6abe9c23684d2b7d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:56Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.138763 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cec185f78f20cb42969ab7a1c67289c23ab5714e5c397b1fb5ee55540adcc80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:56Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.149499 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dq2bd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"67cee991-0227-45a5-bb2d-226481f03fd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45a1ed486d5aef56ffd0f6ed50755a384ab94ce452ea9b4d51804fdca6eb179b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8fmw9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:45Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dq2bd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:56Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.165130 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:56Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.181846 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.181901 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.181938 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.181964 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.181980 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:56Z","lastTransitionTime":"2026-01-21T15:24:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.188407 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbaeb819c15bc6d2c7a27387f825f6f2b84a9671ca0d4440fc56b98c9bab7b52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf2232b9e5a3d00ab5e6640b47c6fddffc45b84322505c86450c23e884607366\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee527cf2f3d51077144d81d16dbb6b3563f6e40e30ba92d502b09801ecbca2ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://33a648b0889c361c99d72d947f51b4bf9ed5373bb3192c06f787a46be05694d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3346a404ecc46aaf27f9eebec820eceb15899c2da83fd513d8c1fde406d47ff8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ed7c1837f033493cb08cde88c75467a9fa1dda6ca0f1c72bb2f0c9dccfd0773\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ea556125c81d85d41ebe6a38fcace0b317ecb58c429ae80ae0772790243d107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9ea556125c81d85d41ebe6a38fcace0b317ecb58c429ae80ae0772790243d107\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T15:24:54Z\\\",\\\"message\\\":\\\"\\\\nI0121 15:24:53.899464 6479 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0121 15:24:53.899488 6479 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0121 15:24:53.899532 6479 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0121 15:24:53.899495 6479 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0121 15:24:53.899556 6479 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0121 15:24:53.899588 6479 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0121 15:24:53.899608 6479 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0121 15:24:53.899643 6479 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0121 15:24:53.899726 6479 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0121 15:24:53.899733 6479 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0121 15:24:53.899739 6479 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0121 15:24:53.899774 6479 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0121 15:24:53.899780 6479 handler.go:208] Removed *v1.Node event handler 7\\\\nI0121 15:24:53.899819 6479 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0121 15:24:53.899810 6479 factory.go:656] Stopping watch factory\\\\nI0121 15:24:53.899846 6479 ovnkube.go:599] Stopped ovnkube\\\\nI01\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-9flhm_openshift-ovn-kubernetes(2e3f3965-4473-46d7-a613-2ed3e4b10ad7)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb81f970e9b92212535b8961f324f7e9904f3ead656d5aea28d1a7054245ede8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9flhm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:56Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.201694 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ce8d981292a75a56750a6c51bf6f523a6b29e0496d5b406455711ab49c4c81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c5895dd4f412ff99b134a78d67b65a3792a50da3f9a88407af678092a29d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n22xz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:56Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.218294 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:56Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.235082 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-k9hxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ddf892f9-a048-4335-995e-de581763d230\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d45ae12ddf6d1ac783e31194495a45834964d0379bda6db0655e0194e6f6273b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34249e9db1c2f9f8a1d6f840f32de9cfa2b3f071de7a72784674e64607f513b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34249e9db1c2f9f8a1d6f840f32de9cfa2b3f071de7a72784674e64607f513b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-k9hxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:56Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.247640 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ce8d981292a75a56750a6c51bf6f523a6b29e0496d5b406455711ab49c4c81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c5895dd4f412ff99b134a78d67b65a3792a50da3f9a88407af678092a29d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n22xz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:56Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.260243 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:56Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.277795 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-k9hxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ddf892f9-a048-4335-995e-de581763d230\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d45ae12ddf6d1ac783e31194495a45834964d0379bda6db0655e0194e6f6273b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34249e9db1c2f9f8a1d6f840f32de9cfa2b3f071de7a72784674e64607f513b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34249e9db1c2f9f8a1d6f840f32de9cfa2b3f071de7a72784674e64607f513b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-k9hxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:56Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.284841 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.284871 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.284881 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.284895 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.284921 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:56Z","lastTransitionTime":"2026-01-21T15:24:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.292650 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97953e2c-3d85-44a4-9900-b3c53432cc90\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1f9f8b3712320f6149f9124f2c3d85a9957a64678d7f1d0a11728c00afc74cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f805306703ac31a2921b4d3bfd202ef7c3c71e5954c987ad3b4bbd4827b46568\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4543419f7cc5df1a3788158c9b4540bb0e5320349e033d6a4a495267a84f5dd8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effb345da8dd5ec896b8b390b24c11d5fafc667b987d984ea5df7647068c4f46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:56Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.305862 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:56Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.319726 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0aed55e8987f8148a0e729da9e5dcad94bc05e1441eb8e087e05d8b81da4d2df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:56Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.333415 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://da2a92bf0daa0e03e89d2a5ff93137363faab6817894aadb90e424b35c878da6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e8a3282b80d888e687ba06911f278868d7f50c3f0c52745beebc66440deeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:56Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.343169 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vg9bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78b7c66b-9935-480c-bf2e-9109b6141006\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9036131b35dd8128019792e8c0100da93fec521e8b003b05f17196b85991d46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t65tk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vg9bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:56Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.355628 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd7j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee3a703c230946ab5d32db22cafa6b8a8945566d53e5b97402adaf6701f3addd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9rhz8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd7j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:56Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.367789 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-597dt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2265b990-9ae6-48a6-b93e-c91bf08a41f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7264e502c2112ab6e5c6ef790a042b5e3fd99d2f1fc117ca3c1245997cabdf59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgbzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://96d3eaf7bd96bfc2526f43d3a02a4d61c9ebfd3562ee0cbbe3a3fe19ddf2429c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgbzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-597dt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:56Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.380325 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bea603621cfb30c2b0a1dc2021ea515cb87cd590232f1c4696341f7eca1d4535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10f4a9dbab7b11fa3eaaae6060df484ff7e212710188c29acc1834f07310d49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbcf60368c95e2747fd275984fc6cfc714ca37c93d92edc446643a41c4022c56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39f53539825ef70fcde1b61f8e650f22c059fd674523bfe1882b9844923ce9c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6064978522c6ddd526c20221a2d4a73961beaea35b31eab9598087b1af017753\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"stHeaderAuthRequestController\\\\nI0121 15:24:36.730187 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 15:24:36.730206 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 15:24:36.730225 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 15:24:36.730232 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 15:24:36.735520 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0121 15:24:36.735652 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735731 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 15:24:36.735756 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 15:24:36.735765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 15:24:36.735770 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0121 15:24:36.737297 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0121 15:24:36.741633 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741680 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741699 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-1615556949/tls.crt::/tmp/serving-cert-1615556949/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769009060\\\\\\\\\\\\\\\" (2026-01-21 15:24:20 +0000 UTC to 2026-02-20 15:24:21 +0000 UTC (now=2026-01-21 15:24:36.741651328 +0000 UTC))\\\\\\\"\\\\nF0121 15:24:36.741699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06cf2acfbc236dc530bb69dff9e6e83755c426daba101b6b6abe9c23684d2b7d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:56Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.386579 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.386617 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.386628 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.386644 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.386655 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:56Z","lastTransitionTime":"2026-01-21T15:24:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.391623 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cec185f78f20cb42969ab7a1c67289c23ab5714e5c397b1fb5ee55540adcc80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:56Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.401698 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dq2bd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"67cee991-0227-45a5-bb2d-226481f03fd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45a1ed486d5aef56ffd0f6ed50755a384ab94ce452ea9b4d51804fdca6eb179b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8fmw9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:45Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dq2bd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:56Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.414693 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:56Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.435304 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbaeb819c15bc6d2c7a27387f825f6f2b84a9671ca0d4440fc56b98c9bab7b52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf2232b9e5a3d00ab5e6640b47c6fddffc45b84322505c86450c23e884607366\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee527cf2f3d51077144d81d16dbb6b3563f6e40e30ba92d502b09801ecbca2ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://33a648b0889c361c99d72d947f51b4bf9ed5373bb3192c06f787a46be05694d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3346a404ecc46aaf27f9eebec820eceb15899c2da83fd513d8c1fde406d47ff8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ed7c1837f033493cb08cde88c75467a9fa1dda6ca0f1c72bb2f0c9dccfd0773\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ea556125c81d85d41ebe6a38fcace0b317ecb58c429ae80ae0772790243d107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9ea556125c81d85d41ebe6a38fcace0b317ecb58c429ae80ae0772790243d107\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T15:24:54Z\\\",\\\"message\\\":\\\"\\\\nI0121 15:24:53.899464 6479 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0121 15:24:53.899488 6479 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0121 15:24:53.899532 6479 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0121 15:24:53.899495 6479 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0121 15:24:53.899556 6479 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0121 15:24:53.899588 6479 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0121 15:24:53.899608 6479 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0121 15:24:53.899643 6479 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0121 15:24:53.899726 6479 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0121 15:24:53.899733 6479 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0121 15:24:53.899739 6479 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0121 15:24:53.899774 6479 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0121 15:24:53.899780 6479 handler.go:208] Removed *v1.Node event handler 7\\\\nI0121 15:24:53.899819 6479 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0121 15:24:53.899810 6479 factory.go:656] Stopping watch factory\\\\nI0121 15:24:53.899846 6479 ovnkube.go:599] Stopped ovnkube\\\\nI01\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-9flhm_openshift-ovn-kubernetes(2e3f3965-4473-46d7-a613-2ed3e4b10ad7)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb81f970e9b92212535b8961f324f7e9904f3ead656d5aea28d1a7054245ede8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9flhm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:56Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.490104 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.490142 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.490150 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.490166 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.490180 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:56Z","lastTransitionTime":"2026-01-21T15:24:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.593459 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.593530 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.593542 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.593565 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.593580 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:56Z","lastTransitionTime":"2026-01-21T15:24:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.696734 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.697095 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.697217 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.697315 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.697399 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:56Z","lastTransitionTime":"2026-01-21T15:24:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.704049 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-22 23:05:01.506286114 +0000 UTC Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.737580 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:24:56 crc kubenswrapper[5021]: E0121 15:24:56.737982 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.737571 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:24:56 crc kubenswrapper[5021]: E0121 15:24:56.741752 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.741878 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:24:56 crc kubenswrapper[5021]: E0121 15:24:56.742192 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.800353 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.800393 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.800405 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.800424 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.800436 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:56Z","lastTransitionTime":"2026-01-21T15:24:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.857164 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-xtd2p"] Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.858361 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:24:56 crc kubenswrapper[5021]: E0121 15:24:56.858525 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xtd2p" podUID="cb60592c-6770-457b-b2ae-2c6c8f2a4149" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.879424 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-k9hxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ddf892f9-a048-4335-995e-de581763d230\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d45ae12ddf6d1ac783e31194495a45834964d0379bda6db0655e0194e6f6273b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34249e9db1c2f9f8a1d6f840f32de9cfa2b3f071de7a72784674e64607f513b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34249e9db1c2f9f8a1d6f840f32de9cfa2b3f071de7a72784674e64607f513b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-k9hxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:56Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.900782 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ce8d981292a75a56750a6c51bf6f523a6b29e0496d5b406455711ab49c4c81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c5895dd4f412ff99b134a78d67b65a3792a50da3f9a88407af678092a29d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n22xz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:56Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.903871 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.903937 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.903955 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.903978 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.903992 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:56Z","lastTransitionTime":"2026-01-21T15:24:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.917085 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:56Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.930131 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97953e2c-3d85-44a4-9900-b3c53432cc90\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1f9f8b3712320f6149f9124f2c3d85a9957a64678d7f1d0a11728c00afc74cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f805306703ac31a2921b4d3bfd202ef7c3c71e5954c987ad3b4bbd4827b46568\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4543419f7cc5df1a3788158c9b4540bb0e5320349e033d6a4a495267a84f5dd8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effb345da8dd5ec896b8b390b24c11d5fafc667b987d984ea5df7647068c4f46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:56Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.946537 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:56Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:56 crc kubenswrapper[5021]: I0121 15:24:56.983226 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0aed55e8987f8148a0e729da9e5dcad94bc05e1441eb8e087e05d8b81da4d2df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:56Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.006371 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xbnsh\" (UniqueName: \"kubernetes.io/projected/cb60592c-6770-457b-b2ae-2c6c8f2a4149-kube-api-access-xbnsh\") pod \"network-metrics-daemon-xtd2p\" (UID: \"cb60592c-6770-457b-b2ae-2c6c8f2a4149\") " pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.006476 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/cb60592c-6770-457b-b2ae-2c6c8f2a4149-metrics-certs\") pod \"network-metrics-daemon-xtd2p\" (UID: \"cb60592c-6770-457b-b2ae-2c6c8f2a4149\") " pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.006623 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.006656 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.006666 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.006680 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.006691 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:57Z","lastTransitionTime":"2026-01-21T15:24:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.006682 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-xtd2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb60592c-6770-457b-b2ae-2c6c8f2a4149\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xbnsh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xbnsh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:56Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-xtd2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:56Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.028267 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cec185f78f20cb42969ab7a1c67289c23ab5714e5c397b1fb5ee55540adcc80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:57Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.042522 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://da2a92bf0daa0e03e89d2a5ff93137363faab6817894aadb90e424b35c878da6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e8a3282b80d888e687ba06911f278868d7f50c3f0c52745beebc66440deeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:57Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.054117 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vg9bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78b7c66b-9935-480c-bf2e-9109b6141006\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9036131b35dd8128019792e8c0100da93fec521e8b003b05f17196b85991d46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t65tk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vg9bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:57Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.068065 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd7j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee3a703c230946ab5d32db22cafa6b8a8945566d53e5b97402adaf6701f3addd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9rhz8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd7j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:57Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.080820 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-597dt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2265b990-9ae6-48a6-b93e-c91bf08a41f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7264e502c2112ab6e5c6ef790a042b5e3fd99d2f1fc117ca3c1245997cabdf59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgbzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://96d3eaf7bd96bfc2526f43d3a02a4d61c9ebfd3562ee0cbbe3a3fe19ddf2429c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgbzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-597dt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:57Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.096525 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bea603621cfb30c2b0a1dc2021ea515cb87cd590232f1c4696341f7eca1d4535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10f4a9dbab7b11fa3eaaae6060df484ff7e212710188c29acc1834f07310d49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbcf60368c95e2747fd275984fc6cfc714ca37c93d92edc446643a41c4022c56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39f53539825ef70fcde1b61f8e650f22c059fd674523bfe1882b9844923ce9c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6064978522c6ddd526c20221a2d4a73961beaea35b31eab9598087b1af017753\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"stHeaderAuthRequestController\\\\nI0121 15:24:36.730187 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 15:24:36.730206 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 15:24:36.730225 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 15:24:36.730232 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 15:24:36.735520 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0121 15:24:36.735652 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735731 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 15:24:36.735756 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 15:24:36.735765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 15:24:36.735770 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0121 15:24:36.737297 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0121 15:24:36.741633 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741680 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741699 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-1615556949/tls.crt::/tmp/serving-cert-1615556949/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769009060\\\\\\\\\\\\\\\" (2026-01-21 15:24:20 +0000 UTC to 2026-02-20 15:24:21 +0000 UTC (now=2026-01-21 15:24:36.741651328 +0000 UTC))\\\\\\\"\\\\nF0121 15:24:36.741699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06cf2acfbc236dc530bb69dff9e6e83755c426daba101b6b6abe9c23684d2b7d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:57Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:57 crc kubenswrapper[5021]: E0121 15:24:57.108215 5021 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 21 15:24:57 crc kubenswrapper[5021]: E0121 15:24:57.108306 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cb60592c-6770-457b-b2ae-2c6c8f2a4149-metrics-certs podName:cb60592c-6770-457b-b2ae-2c6c8f2a4149 nodeName:}" failed. No retries permitted until 2026-01-21 15:24:57.608285166 +0000 UTC m=+39.143399055 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/cb60592c-6770-457b-b2ae-2c6c8f2a4149-metrics-certs") pod "network-metrics-daemon-xtd2p" (UID: "cb60592c-6770-457b-b2ae-2c6c8f2a4149") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.108065 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/cb60592c-6770-457b-b2ae-2c6c8f2a4149-metrics-certs\") pod \"network-metrics-daemon-xtd2p\" (UID: \"cb60592c-6770-457b-b2ae-2c6c8f2a4149\") " pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.108640 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xbnsh\" (UniqueName: \"kubernetes.io/projected/cb60592c-6770-457b-b2ae-2c6c8f2a4149-kube-api-access-xbnsh\") pod \"network-metrics-daemon-xtd2p\" (UID: \"cb60592c-6770-457b-b2ae-2c6c8f2a4149\") " pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.109446 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.109489 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.109500 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.109518 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.109533 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:57Z","lastTransitionTime":"2026-01-21T15:24:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.119843 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbaeb819c15bc6d2c7a27387f825f6f2b84a9671ca0d4440fc56b98c9bab7b52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf2232b9e5a3d00ab5e6640b47c6fddffc45b84322505c86450c23e884607366\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee527cf2f3d51077144d81d16dbb6b3563f6e40e30ba92d502b09801ecbca2ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://33a648b0889c361c99d72d947f51b4bf9ed5373bb3192c06f787a46be05694d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3346a404ecc46aaf27f9eebec820eceb15899c2da83fd513d8c1fde406d47ff8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ed7c1837f033493cb08cde88c75467a9fa1dda6ca0f1c72bb2f0c9dccfd0773\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ea556125c81d85d41ebe6a38fcace0b317ecb58c429ae80ae0772790243d107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9ea556125c81d85d41ebe6a38fcace0b317ecb58c429ae80ae0772790243d107\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T15:24:54Z\\\",\\\"message\\\":\\\"\\\\nI0121 15:24:53.899464 6479 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0121 15:24:53.899488 6479 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0121 15:24:53.899532 6479 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0121 15:24:53.899495 6479 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0121 15:24:53.899556 6479 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0121 15:24:53.899588 6479 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0121 15:24:53.899608 6479 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0121 15:24:53.899643 6479 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0121 15:24:53.899726 6479 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0121 15:24:53.899733 6479 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0121 15:24:53.899739 6479 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0121 15:24:53.899774 6479 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0121 15:24:53.899780 6479 handler.go:208] Removed *v1.Node event handler 7\\\\nI0121 15:24:53.899819 6479 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0121 15:24:53.899810 6479 factory.go:656] Stopping watch factory\\\\nI0121 15:24:53.899846 6479 ovnkube.go:599] Stopped ovnkube\\\\nI01\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-9flhm_openshift-ovn-kubernetes(2e3f3965-4473-46d7-a613-2ed3e4b10ad7)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb81f970e9b92212535b8961f324f7e9904f3ead656d5aea28d1a7054245ede8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9flhm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:57Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.130923 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xbnsh\" (UniqueName: \"kubernetes.io/projected/cb60592c-6770-457b-b2ae-2c6c8f2a4149-kube-api-access-xbnsh\") pod \"network-metrics-daemon-xtd2p\" (UID: \"cb60592c-6770-457b-b2ae-2c6c8f2a4149\") " pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.132951 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dq2bd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"67cee991-0227-45a5-bb2d-226481f03fd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45a1ed486d5aef56ffd0f6ed50755a384ab94ce452ea9b4d51804fdca6eb179b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8fmw9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:45Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dq2bd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:57Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.148126 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:57Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.212681 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.212744 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.212762 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.212783 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.212796 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:57Z","lastTransitionTime":"2026-01-21T15:24:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.316607 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.316683 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.316694 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.316711 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.316721 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:57Z","lastTransitionTime":"2026-01-21T15:24:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.421072 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.421125 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.421136 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.421160 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.421174 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:57Z","lastTransitionTime":"2026-01-21T15:24:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.525509 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.525601 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.525673 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.525715 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.525747 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:57Z","lastTransitionTime":"2026-01-21T15:24:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.615965 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/cb60592c-6770-457b-b2ae-2c6c8f2a4149-metrics-certs\") pod \"network-metrics-daemon-xtd2p\" (UID: \"cb60592c-6770-457b-b2ae-2c6c8f2a4149\") " pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:24:57 crc kubenswrapper[5021]: E0121 15:24:57.616250 5021 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 21 15:24:57 crc kubenswrapper[5021]: E0121 15:24:57.616372 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cb60592c-6770-457b-b2ae-2c6c8f2a4149-metrics-certs podName:cb60592c-6770-457b-b2ae-2c6c8f2a4149 nodeName:}" failed. No retries permitted until 2026-01-21 15:24:58.616339006 +0000 UTC m=+40.151452935 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/cb60592c-6770-457b-b2ae-2c6c8f2a4149-metrics-certs") pod "network-metrics-daemon-xtd2p" (UID: "cb60592c-6770-457b-b2ae-2c6c8f2a4149") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.628660 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.628736 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.628761 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.628794 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.628818 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:57Z","lastTransitionTime":"2026-01-21T15:24:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.704802 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-06 20:26:01.005538913 +0000 UTC Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.732638 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.732701 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.732719 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.732745 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.732763 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:57Z","lastTransitionTime":"2026-01-21T15:24:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.835992 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.836065 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.836084 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.836112 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.836131 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:57Z","lastTransitionTime":"2026-01-21T15:24:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.940446 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.940545 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.940584 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.940621 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:57 crc kubenswrapper[5021]: I0121 15:24:57.940644 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:57Z","lastTransitionTime":"2026-01-21T15:24:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.043694 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.043758 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.043777 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.043800 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.043814 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:58Z","lastTransitionTime":"2026-01-21T15:24:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.147408 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.147471 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.147486 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.147503 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.147523 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:58Z","lastTransitionTime":"2026-01-21T15:24:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.251593 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.251661 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.251680 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.251701 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.251720 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:58Z","lastTransitionTime":"2026-01-21T15:24:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.354376 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.354431 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.354447 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.354468 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.354481 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:58Z","lastTransitionTime":"2026-01-21T15:24:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.456946 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.457822 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.457840 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.458060 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.458080 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:58Z","lastTransitionTime":"2026-01-21T15:24:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.560891 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.560991 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.561003 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.561026 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.561042 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:58Z","lastTransitionTime":"2026-01-21T15:24:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.628558 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/cb60592c-6770-457b-b2ae-2c6c8f2a4149-metrics-certs\") pod \"network-metrics-daemon-xtd2p\" (UID: \"cb60592c-6770-457b-b2ae-2c6c8f2a4149\") " pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:24:58 crc kubenswrapper[5021]: E0121 15:24:58.629001 5021 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 21 15:24:58 crc kubenswrapper[5021]: E0121 15:24:58.629070 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cb60592c-6770-457b-b2ae-2c6c8f2a4149-metrics-certs podName:cb60592c-6770-457b-b2ae-2c6c8f2a4149 nodeName:}" failed. No retries permitted until 2026-01-21 15:25:00.629052123 +0000 UTC m=+42.164166012 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/cb60592c-6770-457b-b2ae-2c6c8f2a4149-metrics-certs") pod "network-metrics-daemon-xtd2p" (UID: "cb60592c-6770-457b-b2ae-2c6c8f2a4149") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.663731 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.663773 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.663783 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.663799 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.663813 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:58Z","lastTransitionTime":"2026-01-21T15:24:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.705394 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-18 02:28:16.34388953 +0000 UTC Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.737153 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.737154 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.737251 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.737289 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:24:58 crc kubenswrapper[5021]: E0121 15:24:58.737470 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:24:58 crc kubenswrapper[5021]: E0121 15:24:58.737661 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xtd2p" podUID="cb60592c-6770-457b-b2ae-2c6c8f2a4149" Jan 21 15:24:58 crc kubenswrapper[5021]: E0121 15:24:58.737812 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:24:58 crc kubenswrapper[5021]: E0121 15:24:58.737937 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.754564 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97953e2c-3d85-44a4-9900-b3c53432cc90\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1f9f8b3712320f6149f9124f2c3d85a9957a64678d7f1d0a11728c00afc74cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f805306703ac31a2921b4d3bfd202ef7c3c71e5954c987ad3b4bbd4827b46568\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4543419f7cc5df1a3788158c9b4540bb0e5320349e033d6a4a495267a84f5dd8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effb345da8dd5ec896b8b390b24c11d5fafc667b987d984ea5df7647068c4f46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:58Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.768036 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.768115 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.768133 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.768155 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.768188 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:58Z","lastTransitionTime":"2026-01-21T15:24:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.772639 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:58Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.790540 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0aed55e8987f8148a0e729da9e5dcad94bc05e1441eb8e087e05d8b81da4d2df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:58Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.807360 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-xtd2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb60592c-6770-457b-b2ae-2c6c8f2a4149\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xbnsh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xbnsh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:56Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-xtd2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:58Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.824541 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://da2a92bf0daa0e03e89d2a5ff93137363faab6817894aadb90e424b35c878da6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e8a3282b80d888e687ba06911f278868d7f50c3f0c52745beebc66440deeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:58Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.843788 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vg9bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78b7c66b-9935-480c-bf2e-9109b6141006\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9036131b35dd8128019792e8c0100da93fec521e8b003b05f17196b85991d46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t65tk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vg9bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:58Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.860357 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd7j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee3a703c230946ab5d32db22cafa6b8a8945566d53e5b97402adaf6701f3addd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9rhz8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd7j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:58Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.872037 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.872081 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.872092 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.872118 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.872131 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:58Z","lastTransitionTime":"2026-01-21T15:24:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.877893 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-597dt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2265b990-9ae6-48a6-b93e-c91bf08a41f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7264e502c2112ab6e5c6ef790a042b5e3fd99d2f1fc117ca3c1245997cabdf59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgbzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://96d3eaf7bd96bfc2526f43d3a02a4d61c9ebfd3562ee0cbbe3a3fe19ddf2429c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgbzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-597dt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:58Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.899791 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bea603621cfb30c2b0a1dc2021ea515cb87cd590232f1c4696341f7eca1d4535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10f4a9dbab7b11fa3eaaae6060df484ff7e212710188c29acc1834f07310d49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbcf60368c95e2747fd275984fc6cfc714ca37c93d92edc446643a41c4022c56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39f53539825ef70fcde1b61f8e650f22c059fd674523bfe1882b9844923ce9c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6064978522c6ddd526c20221a2d4a73961beaea35b31eab9598087b1af017753\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"stHeaderAuthRequestController\\\\nI0121 15:24:36.730187 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 15:24:36.730206 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 15:24:36.730225 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 15:24:36.730232 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 15:24:36.735520 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0121 15:24:36.735652 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735731 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 15:24:36.735756 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 15:24:36.735765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 15:24:36.735770 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0121 15:24:36.737297 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0121 15:24:36.741633 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741680 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741699 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-1615556949/tls.crt::/tmp/serving-cert-1615556949/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769009060\\\\\\\\\\\\\\\" (2026-01-21 15:24:20 +0000 UTC to 2026-02-20 15:24:21 +0000 UTC (now=2026-01-21 15:24:36.741651328 +0000 UTC))\\\\\\\"\\\\nF0121 15:24:36.741699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06cf2acfbc236dc530bb69dff9e6e83755c426daba101b6b6abe9c23684d2b7d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:58Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.921815 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cec185f78f20cb42969ab7a1c67289c23ab5714e5c397b1fb5ee55540adcc80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:58Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.937581 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dq2bd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"67cee991-0227-45a5-bb2d-226481f03fd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45a1ed486d5aef56ffd0f6ed50755a384ab94ce452ea9b4d51804fdca6eb179b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8fmw9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:45Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dq2bd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:58Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.953113 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:58Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.975761 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.975805 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.975815 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.975833 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.975843 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:58Z","lastTransitionTime":"2026-01-21T15:24:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.976417 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbaeb819c15bc6d2c7a27387f825f6f2b84a9671ca0d4440fc56b98c9bab7b52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf2232b9e5a3d00ab5e6640b47c6fddffc45b84322505c86450c23e884607366\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee527cf2f3d51077144d81d16dbb6b3563f6e40e30ba92d502b09801ecbca2ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://33a648b0889c361c99d72d947f51b4bf9ed5373bb3192c06f787a46be05694d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3346a404ecc46aaf27f9eebec820eceb15899c2da83fd513d8c1fde406d47ff8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ed7c1837f033493cb08cde88c75467a9fa1dda6ca0f1c72bb2f0c9dccfd0773\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ea556125c81d85d41ebe6a38fcace0b317ecb58c429ae80ae0772790243d107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9ea556125c81d85d41ebe6a38fcace0b317ecb58c429ae80ae0772790243d107\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T15:24:54Z\\\",\\\"message\\\":\\\"\\\\nI0121 15:24:53.899464 6479 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0121 15:24:53.899488 6479 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0121 15:24:53.899532 6479 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0121 15:24:53.899495 6479 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0121 15:24:53.899556 6479 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0121 15:24:53.899588 6479 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0121 15:24:53.899608 6479 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0121 15:24:53.899643 6479 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0121 15:24:53.899726 6479 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0121 15:24:53.899733 6479 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0121 15:24:53.899739 6479 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0121 15:24:53.899774 6479 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0121 15:24:53.899780 6479 handler.go:208] Removed *v1.Node event handler 7\\\\nI0121 15:24:53.899819 6479 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0121 15:24:53.899810 6479 factory.go:656] Stopping watch factory\\\\nI0121 15:24:53.899846 6479 ovnkube.go:599] Stopped ovnkube\\\\nI01\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-9flhm_openshift-ovn-kubernetes(2e3f3965-4473-46d7-a613-2ed3e4b10ad7)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb81f970e9b92212535b8961f324f7e9904f3ead656d5aea28d1a7054245ede8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9flhm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:58Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:58 crc kubenswrapper[5021]: I0121 15:24:58.991004 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ce8d981292a75a56750a6c51bf6f523a6b29e0496d5b406455711ab49c4c81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c5895dd4f412ff99b134a78d67b65a3792a50da3f9a88407af678092a29d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n22xz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:58Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:59 crc kubenswrapper[5021]: I0121 15:24:59.004805 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:59Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:59 crc kubenswrapper[5021]: I0121 15:24:59.020551 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-k9hxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ddf892f9-a048-4335-995e-de581763d230\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d45ae12ddf6d1ac783e31194495a45834964d0379bda6db0655e0194e6f6273b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34249e9db1c2f9f8a1d6f840f32de9cfa2b3f071de7a72784674e64607f513b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34249e9db1c2f9f8a1d6f840f32de9cfa2b3f071de7a72784674e64607f513b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-k9hxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:24:59Z is after 2025-08-24T17:21:41Z" Jan 21 15:24:59 crc kubenswrapper[5021]: I0121 15:24:59.081900 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:59 crc kubenswrapper[5021]: I0121 15:24:59.082034 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:59 crc kubenswrapper[5021]: I0121 15:24:59.082049 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:59 crc kubenswrapper[5021]: I0121 15:24:59.082302 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:59 crc kubenswrapper[5021]: I0121 15:24:59.082323 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:59Z","lastTransitionTime":"2026-01-21T15:24:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:59 crc kubenswrapper[5021]: I0121 15:24:59.185691 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:59 crc kubenswrapper[5021]: I0121 15:24:59.185728 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:59 crc kubenswrapper[5021]: I0121 15:24:59.185737 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:59 crc kubenswrapper[5021]: I0121 15:24:59.185750 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:59 crc kubenswrapper[5021]: I0121 15:24:59.185761 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:59Z","lastTransitionTime":"2026-01-21T15:24:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:59 crc kubenswrapper[5021]: I0121 15:24:59.289017 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:59 crc kubenswrapper[5021]: I0121 15:24:59.289065 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:59 crc kubenswrapper[5021]: I0121 15:24:59.289077 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:59 crc kubenswrapper[5021]: I0121 15:24:59.289100 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:59 crc kubenswrapper[5021]: I0121 15:24:59.289117 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:59Z","lastTransitionTime":"2026-01-21T15:24:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:59 crc kubenswrapper[5021]: I0121 15:24:59.391992 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:59 crc kubenswrapper[5021]: I0121 15:24:59.392051 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:59 crc kubenswrapper[5021]: I0121 15:24:59.392066 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:59 crc kubenswrapper[5021]: I0121 15:24:59.392086 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:59 crc kubenswrapper[5021]: I0121 15:24:59.392099 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:59Z","lastTransitionTime":"2026-01-21T15:24:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:59 crc kubenswrapper[5021]: I0121 15:24:59.494828 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:59 crc kubenswrapper[5021]: I0121 15:24:59.494866 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:59 crc kubenswrapper[5021]: I0121 15:24:59.494878 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:59 crc kubenswrapper[5021]: I0121 15:24:59.494895 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:59 crc kubenswrapper[5021]: I0121 15:24:59.494929 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:59Z","lastTransitionTime":"2026-01-21T15:24:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:59 crc kubenswrapper[5021]: I0121 15:24:59.597705 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:59 crc kubenswrapper[5021]: I0121 15:24:59.597746 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:59 crc kubenswrapper[5021]: I0121 15:24:59.597758 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:59 crc kubenswrapper[5021]: I0121 15:24:59.597774 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:59 crc kubenswrapper[5021]: I0121 15:24:59.597786 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:59Z","lastTransitionTime":"2026-01-21T15:24:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:59 crc kubenswrapper[5021]: I0121 15:24:59.700792 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:59 crc kubenswrapper[5021]: I0121 15:24:59.700857 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:59 crc kubenswrapper[5021]: I0121 15:24:59.700875 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:59 crc kubenswrapper[5021]: I0121 15:24:59.700957 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:59 crc kubenswrapper[5021]: I0121 15:24:59.700976 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:59Z","lastTransitionTime":"2026-01-21T15:24:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:59 crc kubenswrapper[5021]: I0121 15:24:59.705933 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-01 12:49:23.339623543 +0000 UTC Jan 21 15:24:59 crc kubenswrapper[5021]: I0121 15:24:59.803694 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:59 crc kubenswrapper[5021]: I0121 15:24:59.803741 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:59 crc kubenswrapper[5021]: I0121 15:24:59.803752 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:59 crc kubenswrapper[5021]: I0121 15:24:59.803771 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:59 crc kubenswrapper[5021]: I0121 15:24:59.803782 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:59Z","lastTransitionTime":"2026-01-21T15:24:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:24:59 crc kubenswrapper[5021]: I0121 15:24:59.906987 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:24:59 crc kubenswrapper[5021]: I0121 15:24:59.907099 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:24:59 crc kubenswrapper[5021]: I0121 15:24:59.907115 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:24:59 crc kubenswrapper[5021]: I0121 15:24:59.907142 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:24:59 crc kubenswrapper[5021]: I0121 15:24:59.907157 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:24:59Z","lastTransitionTime":"2026-01-21T15:24:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.009602 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.009648 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.009660 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.009677 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.009691 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:00Z","lastTransitionTime":"2026-01-21T15:25:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.048233 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.048279 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.048291 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.048307 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.048320 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:00Z","lastTransitionTime":"2026-01-21T15:25:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:00 crc kubenswrapper[5021]: E0121 15:25:00.062528 5021 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90fd653a-5482-4360-a078-7b7d7b2b9201\\\",\\\"systemUUID\\\":\\\"758eea6e-3652-403c-8df7-2cf690a0b7a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:00Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.066701 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.066735 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.066745 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.066762 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.066774 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:00Z","lastTransitionTime":"2026-01-21T15:25:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:00 crc kubenswrapper[5021]: E0121 15:25:00.078548 5021 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90fd653a-5482-4360-a078-7b7d7b2b9201\\\",\\\"systemUUID\\\":\\\"758eea6e-3652-403c-8df7-2cf690a0b7a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:00Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.083969 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.084032 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.084041 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.084061 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.084072 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:00Z","lastTransitionTime":"2026-01-21T15:25:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:00 crc kubenswrapper[5021]: E0121 15:25:00.103348 5021 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90fd653a-5482-4360-a078-7b7d7b2b9201\\\",\\\"systemUUID\\\":\\\"758eea6e-3652-403c-8df7-2cf690a0b7a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:00Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.108688 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.108734 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.108748 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.108766 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.108778 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:00Z","lastTransitionTime":"2026-01-21T15:25:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:00 crc kubenswrapper[5021]: E0121 15:25:00.121093 5021 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90fd653a-5482-4360-a078-7b7d7b2b9201\\\",\\\"systemUUID\\\":\\\"758eea6e-3652-403c-8df7-2cf690a0b7a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:00Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.125868 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.125931 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.125948 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.125966 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.125980 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:00Z","lastTransitionTime":"2026-01-21T15:25:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:00 crc kubenswrapper[5021]: E0121 15:25:00.140011 5021 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90fd653a-5482-4360-a078-7b7d7b2b9201\\\",\\\"systemUUID\\\":\\\"758eea6e-3652-403c-8df7-2cf690a0b7a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:00Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:00 crc kubenswrapper[5021]: E0121 15:25:00.140243 5021 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.142497 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.142535 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.142547 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.142587 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.142600 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:00Z","lastTransitionTime":"2026-01-21T15:25:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.245492 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.245576 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.245595 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.245626 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.245646 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:00Z","lastTransitionTime":"2026-01-21T15:25:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.348363 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.348407 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.348420 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.348438 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.348450 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:00Z","lastTransitionTime":"2026-01-21T15:25:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.453005 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.453063 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.453076 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.453096 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.453111 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:00Z","lastTransitionTime":"2026-01-21T15:25:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.556156 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.556235 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.556248 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.556272 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.556286 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:00Z","lastTransitionTime":"2026-01-21T15:25:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.649577 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/cb60592c-6770-457b-b2ae-2c6c8f2a4149-metrics-certs\") pod \"network-metrics-daemon-xtd2p\" (UID: \"cb60592c-6770-457b-b2ae-2c6c8f2a4149\") " pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:25:00 crc kubenswrapper[5021]: E0121 15:25:00.649731 5021 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 21 15:25:00 crc kubenswrapper[5021]: E0121 15:25:00.649827 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cb60592c-6770-457b-b2ae-2c6c8f2a4149-metrics-certs podName:cb60592c-6770-457b-b2ae-2c6c8f2a4149 nodeName:}" failed. No retries permitted until 2026-01-21 15:25:04.649805599 +0000 UTC m=+46.184919488 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/cb60592c-6770-457b-b2ae-2c6c8f2a4149-metrics-certs") pod "network-metrics-daemon-xtd2p" (UID: "cb60592c-6770-457b-b2ae-2c6c8f2a4149") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.659118 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.659180 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.659194 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.659218 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.659234 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:00Z","lastTransitionTime":"2026-01-21T15:25:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.707074 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-11 06:07:29.388397111 +0000 UTC Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.737694 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.737743 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.737714 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:25:00 crc kubenswrapper[5021]: E0121 15:25:00.737855 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.737898 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:25:00 crc kubenswrapper[5021]: E0121 15:25:00.738044 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xtd2p" podUID="cb60592c-6770-457b-b2ae-2c6c8f2a4149" Jan 21 15:25:00 crc kubenswrapper[5021]: E0121 15:25:00.738218 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:25:00 crc kubenswrapper[5021]: E0121 15:25:00.738290 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.762292 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.762343 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.762354 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.762374 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.762386 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:00Z","lastTransitionTime":"2026-01-21T15:25:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.865555 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.865900 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.865930 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.865951 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.865963 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:00Z","lastTransitionTime":"2026-01-21T15:25:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.968474 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.968552 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.968565 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.968589 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:00 crc kubenswrapper[5021]: I0121 15:25:00.968603 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:00Z","lastTransitionTime":"2026-01-21T15:25:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:01 crc kubenswrapper[5021]: I0121 15:25:01.070977 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:01 crc kubenswrapper[5021]: I0121 15:25:01.071028 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:01 crc kubenswrapper[5021]: I0121 15:25:01.071040 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:01 crc kubenswrapper[5021]: I0121 15:25:01.071058 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:01 crc kubenswrapper[5021]: I0121 15:25:01.071074 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:01Z","lastTransitionTime":"2026-01-21T15:25:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:01 crc kubenswrapper[5021]: I0121 15:25:01.174265 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:01 crc kubenswrapper[5021]: I0121 15:25:01.174326 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:01 crc kubenswrapper[5021]: I0121 15:25:01.174336 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:01 crc kubenswrapper[5021]: I0121 15:25:01.174359 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:01 crc kubenswrapper[5021]: I0121 15:25:01.174371 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:01Z","lastTransitionTime":"2026-01-21T15:25:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:01 crc kubenswrapper[5021]: I0121 15:25:01.277261 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:01 crc kubenswrapper[5021]: I0121 15:25:01.277340 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:01 crc kubenswrapper[5021]: I0121 15:25:01.277363 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:01 crc kubenswrapper[5021]: I0121 15:25:01.277399 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:01 crc kubenswrapper[5021]: I0121 15:25:01.277421 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:01Z","lastTransitionTime":"2026-01-21T15:25:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:01 crc kubenswrapper[5021]: I0121 15:25:01.380072 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:01 crc kubenswrapper[5021]: I0121 15:25:01.380130 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:01 crc kubenswrapper[5021]: I0121 15:25:01.380141 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:01 crc kubenswrapper[5021]: I0121 15:25:01.380158 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:01 crc kubenswrapper[5021]: I0121 15:25:01.380170 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:01Z","lastTransitionTime":"2026-01-21T15:25:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:01 crc kubenswrapper[5021]: I0121 15:25:01.483098 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:01 crc kubenswrapper[5021]: I0121 15:25:01.483146 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:01 crc kubenswrapper[5021]: I0121 15:25:01.483160 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:01 crc kubenswrapper[5021]: I0121 15:25:01.483182 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:01 crc kubenswrapper[5021]: I0121 15:25:01.483193 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:01Z","lastTransitionTime":"2026-01-21T15:25:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:01 crc kubenswrapper[5021]: I0121 15:25:01.588222 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:01 crc kubenswrapper[5021]: I0121 15:25:01.588301 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:01 crc kubenswrapper[5021]: I0121 15:25:01.588319 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:01 crc kubenswrapper[5021]: I0121 15:25:01.588350 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:01 crc kubenswrapper[5021]: I0121 15:25:01.588371 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:01Z","lastTransitionTime":"2026-01-21T15:25:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:01 crc kubenswrapper[5021]: I0121 15:25:01.692182 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:01 crc kubenswrapper[5021]: I0121 15:25:01.692233 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:01 crc kubenswrapper[5021]: I0121 15:25:01.692244 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:01 crc kubenswrapper[5021]: I0121 15:25:01.692269 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:01 crc kubenswrapper[5021]: I0121 15:25:01.692285 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:01Z","lastTransitionTime":"2026-01-21T15:25:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:01 crc kubenswrapper[5021]: I0121 15:25:01.708155 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-07 19:23:26.54712095 +0000 UTC Jan 21 15:25:01 crc kubenswrapper[5021]: I0121 15:25:01.795159 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:01 crc kubenswrapper[5021]: I0121 15:25:01.795195 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:01 crc kubenswrapper[5021]: I0121 15:25:01.795206 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:01 crc kubenswrapper[5021]: I0121 15:25:01.795222 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:01 crc kubenswrapper[5021]: I0121 15:25:01.795235 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:01Z","lastTransitionTime":"2026-01-21T15:25:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:01 crc kubenswrapper[5021]: I0121 15:25:01.898265 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:01 crc kubenswrapper[5021]: I0121 15:25:01.898322 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:01 crc kubenswrapper[5021]: I0121 15:25:01.898336 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:01 crc kubenswrapper[5021]: I0121 15:25:01.898357 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:01 crc kubenswrapper[5021]: I0121 15:25:01.898369 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:01Z","lastTransitionTime":"2026-01-21T15:25:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.002158 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.002221 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.002239 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.002264 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.002281 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:02Z","lastTransitionTime":"2026-01-21T15:25:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.104995 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.105075 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.105099 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.105132 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.105156 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:02Z","lastTransitionTime":"2026-01-21T15:25:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.209354 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.209453 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.209473 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.209503 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.209523 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:02Z","lastTransitionTime":"2026-01-21T15:25:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.312497 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.312552 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.312565 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.312587 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.312603 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:02Z","lastTransitionTime":"2026-01-21T15:25:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.415201 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.415278 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.415294 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.415310 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.415323 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:02Z","lastTransitionTime":"2026-01-21T15:25:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.518554 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.518644 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.518669 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.518707 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.518735 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:02Z","lastTransitionTime":"2026-01-21T15:25:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.622251 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.622299 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.622315 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.622339 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.622356 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:02Z","lastTransitionTime":"2026-01-21T15:25:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.708264 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-18 20:43:21.081871631 +0000 UTC Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.724929 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.724962 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.724972 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.724986 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.724997 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:02Z","lastTransitionTime":"2026-01-21T15:25:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.737444 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.737552 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:25:02 crc kubenswrapper[5021]: E0121 15:25:02.737752 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.737815 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.737979 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:25:02 crc kubenswrapper[5021]: E0121 15:25:02.738075 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:25:02 crc kubenswrapper[5021]: E0121 15:25:02.738259 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xtd2p" podUID="cb60592c-6770-457b-b2ae-2c6c8f2a4149" Jan 21 15:25:02 crc kubenswrapper[5021]: E0121 15:25:02.738428 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.828145 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.828215 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.828230 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.828248 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.828262 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:02Z","lastTransitionTime":"2026-01-21T15:25:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.930584 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.930662 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.930676 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.930700 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:02 crc kubenswrapper[5021]: I0121 15:25:02.930713 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:02Z","lastTransitionTime":"2026-01-21T15:25:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:03 crc kubenswrapper[5021]: I0121 15:25:03.033201 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:03 crc kubenswrapper[5021]: I0121 15:25:03.033269 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:03 crc kubenswrapper[5021]: I0121 15:25:03.033279 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:03 crc kubenswrapper[5021]: I0121 15:25:03.033295 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:03 crc kubenswrapper[5021]: I0121 15:25:03.033305 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:03Z","lastTransitionTime":"2026-01-21T15:25:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:03 crc kubenswrapper[5021]: I0121 15:25:03.137078 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:03 crc kubenswrapper[5021]: I0121 15:25:03.137163 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:03 crc kubenswrapper[5021]: I0121 15:25:03.137187 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:03 crc kubenswrapper[5021]: I0121 15:25:03.137221 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:03 crc kubenswrapper[5021]: I0121 15:25:03.137244 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:03Z","lastTransitionTime":"2026-01-21T15:25:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:03 crc kubenswrapper[5021]: I0121 15:25:03.240498 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:03 crc kubenswrapper[5021]: I0121 15:25:03.240589 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:03 crc kubenswrapper[5021]: I0121 15:25:03.240601 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:03 crc kubenswrapper[5021]: I0121 15:25:03.240620 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:03 crc kubenswrapper[5021]: I0121 15:25:03.240665 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:03Z","lastTransitionTime":"2026-01-21T15:25:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:03 crc kubenswrapper[5021]: I0121 15:25:03.343368 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:03 crc kubenswrapper[5021]: I0121 15:25:03.343434 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:03 crc kubenswrapper[5021]: I0121 15:25:03.343447 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:03 crc kubenswrapper[5021]: I0121 15:25:03.343468 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:03 crc kubenswrapper[5021]: I0121 15:25:03.343484 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:03Z","lastTransitionTime":"2026-01-21T15:25:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:03 crc kubenswrapper[5021]: I0121 15:25:03.451595 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:03 crc kubenswrapper[5021]: I0121 15:25:03.451651 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:03 crc kubenswrapper[5021]: I0121 15:25:03.451663 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:03 crc kubenswrapper[5021]: I0121 15:25:03.451682 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:03 crc kubenswrapper[5021]: I0121 15:25:03.451699 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:03Z","lastTransitionTime":"2026-01-21T15:25:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:03 crc kubenswrapper[5021]: I0121 15:25:03.555148 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:03 crc kubenswrapper[5021]: I0121 15:25:03.555207 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:03 crc kubenswrapper[5021]: I0121 15:25:03.555221 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:03 crc kubenswrapper[5021]: I0121 15:25:03.555244 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:03 crc kubenswrapper[5021]: I0121 15:25:03.555258 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:03Z","lastTransitionTime":"2026-01-21T15:25:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:03 crc kubenswrapper[5021]: I0121 15:25:03.658082 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:03 crc kubenswrapper[5021]: I0121 15:25:03.658122 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:03 crc kubenswrapper[5021]: I0121 15:25:03.658131 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:03 crc kubenswrapper[5021]: I0121 15:25:03.658144 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:03 crc kubenswrapper[5021]: I0121 15:25:03.658153 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:03Z","lastTransitionTime":"2026-01-21T15:25:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:03 crc kubenswrapper[5021]: I0121 15:25:03.708385 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-29 11:23:39.190849131 +0000 UTC Jan 21 15:25:03 crc kubenswrapper[5021]: I0121 15:25:03.761129 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:03 crc kubenswrapper[5021]: I0121 15:25:03.761185 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:03 crc kubenswrapper[5021]: I0121 15:25:03.761197 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:03 crc kubenswrapper[5021]: I0121 15:25:03.761213 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:03 crc kubenswrapper[5021]: I0121 15:25:03.761224 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:03Z","lastTransitionTime":"2026-01-21T15:25:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:03 crc kubenswrapper[5021]: I0121 15:25:03.863511 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:03 crc kubenswrapper[5021]: I0121 15:25:03.863547 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:03 crc kubenswrapper[5021]: I0121 15:25:03.863557 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:03 crc kubenswrapper[5021]: I0121 15:25:03.863575 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:03 crc kubenswrapper[5021]: I0121 15:25:03.863585 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:03Z","lastTransitionTime":"2026-01-21T15:25:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:03 crc kubenswrapper[5021]: I0121 15:25:03.966635 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:03 crc kubenswrapper[5021]: I0121 15:25:03.966727 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:03 crc kubenswrapper[5021]: I0121 15:25:03.966762 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:03 crc kubenswrapper[5021]: I0121 15:25:03.966802 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:03 crc kubenswrapper[5021]: I0121 15:25:03.966825 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:03Z","lastTransitionTime":"2026-01-21T15:25:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.070315 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.070373 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.070388 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.070405 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.070418 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:04Z","lastTransitionTime":"2026-01-21T15:25:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.173984 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.174048 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.174063 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.174086 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.174101 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:04Z","lastTransitionTime":"2026-01-21T15:25:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.189666 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.198352 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.209807 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bea603621cfb30c2b0a1dc2021ea515cb87cd590232f1c4696341f7eca1d4535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10f4a9dbab7b11fa3eaaae6060df484ff7e212710188c29acc1834f07310d49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbcf60368c95e2747fd275984fc6cfc714ca37c93d92edc446643a41c4022c56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39f53539825ef70fcde1b61f8e650f22c059fd674523bfe1882b9844923ce9c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6064978522c6ddd526c20221a2d4a73961beaea35b31eab9598087b1af017753\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"stHeaderAuthRequestController\\\\nI0121 15:24:36.730187 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 15:24:36.730206 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 15:24:36.730225 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 15:24:36.730232 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 15:24:36.735520 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0121 15:24:36.735652 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735731 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 15:24:36.735756 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 15:24:36.735765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 15:24:36.735770 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0121 15:24:36.737297 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0121 15:24:36.741633 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741680 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741699 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-1615556949/tls.crt::/tmp/serving-cert-1615556949/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769009060\\\\\\\\\\\\\\\" (2026-01-21 15:24:20 +0000 UTC to 2026-02-20 15:24:21 +0000 UTC (now=2026-01-21 15:24:36.741651328 +0000 UTC))\\\\\\\"\\\\nF0121 15:24:36.741699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06cf2acfbc236dc530bb69dff9e6e83755c426daba101b6b6abe9c23684d2b7d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:04Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.223767 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cec185f78f20cb42969ab7a1c67289c23ab5714e5c397b1fb5ee55540adcc80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:04Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.241196 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://da2a92bf0daa0e03e89d2a5ff93137363faab6817894aadb90e424b35c878da6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e8a3282b80d888e687ba06911f278868d7f50c3f0c52745beebc66440deeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:04Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.256133 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vg9bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78b7c66b-9935-480c-bf2e-9109b6141006\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9036131b35dd8128019792e8c0100da93fec521e8b003b05f17196b85991d46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t65tk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vg9bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:04Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.272181 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd7j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee3a703c230946ab5d32db22cafa6b8a8945566d53e5b97402adaf6701f3addd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9rhz8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd7j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:04Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.277328 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.277367 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.277377 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.277392 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.277401 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:04Z","lastTransitionTime":"2026-01-21T15:25:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.286443 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-597dt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2265b990-9ae6-48a6-b93e-c91bf08a41f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7264e502c2112ab6e5c6ef790a042b5e3fd99d2f1fc117ca3c1245997cabdf59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgbzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://96d3eaf7bd96bfc2526f43d3a02a4d61c9ebfd3562ee0cbbe3a3fe19ddf2429c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgbzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-597dt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:04Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.304098 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:04Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.332249 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbaeb819c15bc6d2c7a27387f825f6f2b84a9671ca0d4440fc56b98c9bab7b52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf2232b9e5a3d00ab5e6640b47c6fddffc45b84322505c86450c23e884607366\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee527cf2f3d51077144d81d16dbb6b3563f6e40e30ba92d502b09801ecbca2ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://33a648b0889c361c99d72d947f51b4bf9ed5373bb3192c06f787a46be05694d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3346a404ecc46aaf27f9eebec820eceb15899c2da83fd513d8c1fde406d47ff8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ed7c1837f033493cb08cde88c75467a9fa1dda6ca0f1c72bb2f0c9dccfd0773\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ea556125c81d85d41ebe6a38fcace0b317ecb58c429ae80ae0772790243d107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9ea556125c81d85d41ebe6a38fcace0b317ecb58c429ae80ae0772790243d107\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T15:24:54Z\\\",\\\"message\\\":\\\"\\\\nI0121 15:24:53.899464 6479 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0121 15:24:53.899488 6479 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0121 15:24:53.899532 6479 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0121 15:24:53.899495 6479 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0121 15:24:53.899556 6479 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0121 15:24:53.899588 6479 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0121 15:24:53.899608 6479 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0121 15:24:53.899643 6479 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0121 15:24:53.899726 6479 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0121 15:24:53.899733 6479 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0121 15:24:53.899739 6479 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0121 15:24:53.899774 6479 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0121 15:24:53.899780 6479 handler.go:208] Removed *v1.Node event handler 7\\\\nI0121 15:24:53.899819 6479 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0121 15:24:53.899810 6479 factory.go:656] Stopping watch factory\\\\nI0121 15:24:53.899846 6479 ovnkube.go:599] Stopped ovnkube\\\\nI01\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-9flhm_openshift-ovn-kubernetes(2e3f3965-4473-46d7-a613-2ed3e4b10ad7)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb81f970e9b92212535b8961f324f7e9904f3ead656d5aea28d1a7054245ede8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9flhm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:04Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.345407 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dq2bd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"67cee991-0227-45a5-bb2d-226481f03fd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45a1ed486d5aef56ffd0f6ed50755a384ab94ce452ea9b4d51804fdca6eb179b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8fmw9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:45Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dq2bd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:04Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.362298 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:04Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.378644 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-k9hxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ddf892f9-a048-4335-995e-de581763d230\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d45ae12ddf6d1ac783e31194495a45834964d0379bda6db0655e0194e6f6273b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34249e9db1c2f9f8a1d6f840f32de9cfa2b3f071de7a72784674e64607f513b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34249e9db1c2f9f8a1d6f840f32de9cfa2b3f071de7a72784674e64607f513b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-k9hxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:04Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.380442 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.380521 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.380546 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.380580 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.380605 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:04Z","lastTransitionTime":"2026-01-21T15:25:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.396325 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ce8d981292a75a56750a6c51bf6f523a6b29e0496d5b406455711ab49c4c81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c5895dd4f412ff99b134a78d67b65a3792a50da3f9a88407af678092a29d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n22xz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:04Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.412335 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97953e2c-3d85-44a4-9900-b3c53432cc90\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1f9f8b3712320f6149f9124f2c3d85a9957a64678d7f1d0a11728c00afc74cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f805306703ac31a2921b4d3bfd202ef7c3c71e5954c987ad3b4bbd4827b46568\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4543419f7cc5df1a3788158c9b4540bb0e5320349e033d6a4a495267a84f5dd8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effb345da8dd5ec896b8b390b24c11d5fafc667b987d984ea5df7647068c4f46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:04Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.427016 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:04Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.441027 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0aed55e8987f8148a0e729da9e5dcad94bc05e1441eb8e087e05d8b81da4d2df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:04Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.456616 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-xtd2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb60592c-6770-457b-b2ae-2c6c8f2a4149\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xbnsh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xbnsh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:56Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-xtd2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:04Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.483726 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.483769 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.483778 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.483796 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.483816 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:04Z","lastTransitionTime":"2026-01-21T15:25:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.587009 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.587067 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.587082 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.587101 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.587116 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:04Z","lastTransitionTime":"2026-01-21T15:25:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.690116 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.690210 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.690233 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.690274 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.690303 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:04Z","lastTransitionTime":"2026-01-21T15:25:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.694657 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/cb60592c-6770-457b-b2ae-2c6c8f2a4149-metrics-certs\") pod \"network-metrics-daemon-xtd2p\" (UID: \"cb60592c-6770-457b-b2ae-2c6c8f2a4149\") " pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:25:04 crc kubenswrapper[5021]: E0121 15:25:04.694851 5021 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 21 15:25:04 crc kubenswrapper[5021]: E0121 15:25:04.694999 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cb60592c-6770-457b-b2ae-2c6c8f2a4149-metrics-certs podName:cb60592c-6770-457b-b2ae-2c6c8f2a4149 nodeName:}" failed. No retries permitted until 2026-01-21 15:25:12.69497831 +0000 UTC m=+54.230092279 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/cb60592c-6770-457b-b2ae-2c6c8f2a4149-metrics-certs") pod "network-metrics-daemon-xtd2p" (UID: "cb60592c-6770-457b-b2ae-2c6c8f2a4149") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.709345 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-15 21:24:06.224049173 +0000 UTC Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.737305 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.737396 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.737492 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:25:04 crc kubenswrapper[5021]: E0121 15:25:04.737589 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.737623 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:25:04 crc kubenswrapper[5021]: E0121 15:25:04.737787 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:25:04 crc kubenswrapper[5021]: E0121 15:25:04.737955 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:25:04 crc kubenswrapper[5021]: E0121 15:25:04.738133 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xtd2p" podUID="cb60592c-6770-457b-b2ae-2c6c8f2a4149" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.795322 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.795413 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.795439 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.795471 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.795499 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:04Z","lastTransitionTime":"2026-01-21T15:25:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.898799 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.898895 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.898943 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.898970 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:04 crc kubenswrapper[5021]: I0121 15:25:04.898987 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:04Z","lastTransitionTime":"2026-01-21T15:25:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:05 crc kubenswrapper[5021]: I0121 15:25:05.001270 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:05 crc kubenswrapper[5021]: I0121 15:25:05.001323 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:05 crc kubenswrapper[5021]: I0121 15:25:05.001335 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:05 crc kubenswrapper[5021]: I0121 15:25:05.001354 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:05 crc kubenswrapper[5021]: I0121 15:25:05.001368 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:05Z","lastTransitionTime":"2026-01-21T15:25:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:05 crc kubenswrapper[5021]: I0121 15:25:05.104065 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:05 crc kubenswrapper[5021]: I0121 15:25:05.104115 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:05 crc kubenswrapper[5021]: I0121 15:25:05.104127 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:05 crc kubenswrapper[5021]: I0121 15:25:05.104144 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:05 crc kubenswrapper[5021]: I0121 15:25:05.104157 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:05Z","lastTransitionTime":"2026-01-21T15:25:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:05 crc kubenswrapper[5021]: I0121 15:25:05.207692 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:05 crc kubenswrapper[5021]: I0121 15:25:05.207755 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:05 crc kubenswrapper[5021]: I0121 15:25:05.207769 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:05 crc kubenswrapper[5021]: I0121 15:25:05.207792 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:05 crc kubenswrapper[5021]: I0121 15:25:05.207805 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:05Z","lastTransitionTime":"2026-01-21T15:25:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:05 crc kubenswrapper[5021]: I0121 15:25:05.310394 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:05 crc kubenswrapper[5021]: I0121 15:25:05.310436 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:05 crc kubenswrapper[5021]: I0121 15:25:05.310447 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:05 crc kubenswrapper[5021]: I0121 15:25:05.310465 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:05 crc kubenswrapper[5021]: I0121 15:25:05.310478 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:05Z","lastTransitionTime":"2026-01-21T15:25:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:05 crc kubenswrapper[5021]: I0121 15:25:05.413456 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:05 crc kubenswrapper[5021]: I0121 15:25:05.413517 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:05 crc kubenswrapper[5021]: I0121 15:25:05.413534 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:05 crc kubenswrapper[5021]: I0121 15:25:05.413553 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:05 crc kubenswrapper[5021]: I0121 15:25:05.413566 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:05Z","lastTransitionTime":"2026-01-21T15:25:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:05 crc kubenswrapper[5021]: I0121 15:25:05.516733 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:05 crc kubenswrapper[5021]: I0121 15:25:05.516810 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:05 crc kubenswrapper[5021]: I0121 15:25:05.516829 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:05 crc kubenswrapper[5021]: I0121 15:25:05.516855 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:05 crc kubenswrapper[5021]: I0121 15:25:05.516870 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:05Z","lastTransitionTime":"2026-01-21T15:25:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:05 crc kubenswrapper[5021]: I0121 15:25:05.619898 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:05 crc kubenswrapper[5021]: I0121 15:25:05.619984 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:05 crc kubenswrapper[5021]: I0121 15:25:05.620001 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:05 crc kubenswrapper[5021]: I0121 15:25:05.620025 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:05 crc kubenswrapper[5021]: I0121 15:25:05.620037 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:05Z","lastTransitionTime":"2026-01-21T15:25:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:05 crc kubenswrapper[5021]: I0121 15:25:05.710165 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-25 09:59:42.901348208 +0000 UTC Jan 21 15:25:05 crc kubenswrapper[5021]: I0121 15:25:05.723129 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:05 crc kubenswrapper[5021]: I0121 15:25:05.723279 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:05 crc kubenswrapper[5021]: I0121 15:25:05.723297 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:05 crc kubenswrapper[5021]: I0121 15:25:05.723321 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:05 crc kubenswrapper[5021]: I0121 15:25:05.723337 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:05Z","lastTransitionTime":"2026-01-21T15:25:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:05 crc kubenswrapper[5021]: I0121 15:25:05.825563 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:05 crc kubenswrapper[5021]: I0121 15:25:05.825599 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:05 crc kubenswrapper[5021]: I0121 15:25:05.825608 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:05 crc kubenswrapper[5021]: I0121 15:25:05.825622 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:05 crc kubenswrapper[5021]: I0121 15:25:05.825632 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:05Z","lastTransitionTime":"2026-01-21T15:25:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:05 crc kubenswrapper[5021]: I0121 15:25:05.927958 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:05 crc kubenswrapper[5021]: I0121 15:25:05.927985 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:05 crc kubenswrapper[5021]: I0121 15:25:05.927992 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:05 crc kubenswrapper[5021]: I0121 15:25:05.928004 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:05 crc kubenswrapper[5021]: I0121 15:25:05.928013 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:05Z","lastTransitionTime":"2026-01-21T15:25:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:06 crc kubenswrapper[5021]: I0121 15:25:06.030207 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:06 crc kubenswrapper[5021]: I0121 15:25:06.030263 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:06 crc kubenswrapper[5021]: I0121 15:25:06.030272 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:06 crc kubenswrapper[5021]: I0121 15:25:06.030286 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:06 crc kubenswrapper[5021]: I0121 15:25:06.030295 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:06Z","lastTransitionTime":"2026-01-21T15:25:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:06 crc kubenswrapper[5021]: I0121 15:25:06.132624 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:06 crc kubenswrapper[5021]: I0121 15:25:06.132672 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:06 crc kubenswrapper[5021]: I0121 15:25:06.132683 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:06 crc kubenswrapper[5021]: I0121 15:25:06.132699 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:06 crc kubenswrapper[5021]: I0121 15:25:06.132709 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:06Z","lastTransitionTime":"2026-01-21T15:25:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:06 crc kubenswrapper[5021]: I0121 15:25:06.235136 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:06 crc kubenswrapper[5021]: I0121 15:25:06.235222 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:06 crc kubenswrapper[5021]: I0121 15:25:06.235253 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:06 crc kubenswrapper[5021]: I0121 15:25:06.235282 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:06 crc kubenswrapper[5021]: I0121 15:25:06.235299 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:06Z","lastTransitionTime":"2026-01-21T15:25:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:06 crc kubenswrapper[5021]: I0121 15:25:06.337467 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:06 crc kubenswrapper[5021]: I0121 15:25:06.337519 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:06 crc kubenswrapper[5021]: I0121 15:25:06.337529 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:06 crc kubenswrapper[5021]: I0121 15:25:06.337542 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:06 crc kubenswrapper[5021]: I0121 15:25:06.337551 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:06Z","lastTransitionTime":"2026-01-21T15:25:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:06 crc kubenswrapper[5021]: I0121 15:25:06.439882 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:06 crc kubenswrapper[5021]: I0121 15:25:06.439948 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:06 crc kubenswrapper[5021]: I0121 15:25:06.439960 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:06 crc kubenswrapper[5021]: I0121 15:25:06.439976 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:06 crc kubenswrapper[5021]: I0121 15:25:06.439987 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:06Z","lastTransitionTime":"2026-01-21T15:25:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:06 crc kubenswrapper[5021]: I0121 15:25:06.543160 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:06 crc kubenswrapper[5021]: I0121 15:25:06.543195 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:06 crc kubenswrapper[5021]: I0121 15:25:06.543222 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:06 crc kubenswrapper[5021]: I0121 15:25:06.543237 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:06 crc kubenswrapper[5021]: I0121 15:25:06.543245 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:06Z","lastTransitionTime":"2026-01-21T15:25:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:06 crc kubenswrapper[5021]: I0121 15:25:06.646253 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:06 crc kubenswrapper[5021]: I0121 15:25:06.646316 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:06 crc kubenswrapper[5021]: I0121 15:25:06.646335 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:06 crc kubenswrapper[5021]: I0121 15:25:06.646361 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:06 crc kubenswrapper[5021]: I0121 15:25:06.646379 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:06Z","lastTransitionTime":"2026-01-21T15:25:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:06 crc kubenswrapper[5021]: I0121 15:25:06.710656 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-04 14:09:40.393502408 +0000 UTC Jan 21 15:25:06 crc kubenswrapper[5021]: I0121 15:25:06.737696 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:25:06 crc kubenswrapper[5021]: I0121 15:25:06.737770 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:25:06 crc kubenswrapper[5021]: I0121 15:25:06.737794 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:25:06 crc kubenswrapper[5021]: I0121 15:25:06.737728 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:25:06 crc kubenswrapper[5021]: E0121 15:25:06.738021 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xtd2p" podUID="cb60592c-6770-457b-b2ae-2c6c8f2a4149" Jan 21 15:25:06 crc kubenswrapper[5021]: E0121 15:25:06.738109 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:25:06 crc kubenswrapper[5021]: E0121 15:25:06.738187 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:25:06 crc kubenswrapper[5021]: E0121 15:25:06.738329 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:25:06 crc kubenswrapper[5021]: I0121 15:25:06.748760 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:06 crc kubenswrapper[5021]: I0121 15:25:06.749457 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:06 crc kubenswrapper[5021]: I0121 15:25:06.749752 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:06 crc kubenswrapper[5021]: I0121 15:25:06.749805 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:06 crc kubenswrapper[5021]: I0121 15:25:06.749829 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:06Z","lastTransitionTime":"2026-01-21T15:25:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:06 crc kubenswrapper[5021]: I0121 15:25:06.853179 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:06 crc kubenswrapper[5021]: I0121 15:25:06.853227 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:06 crc kubenswrapper[5021]: I0121 15:25:06.853243 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:06 crc kubenswrapper[5021]: I0121 15:25:06.853259 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:06 crc kubenswrapper[5021]: I0121 15:25:06.853271 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:06Z","lastTransitionTime":"2026-01-21T15:25:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:07 crc kubenswrapper[5021]: I0121 15:25:07.383877 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:07 crc kubenswrapper[5021]: I0121 15:25:07.383949 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:07 crc kubenswrapper[5021]: I0121 15:25:07.383980 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:07 crc kubenswrapper[5021]: I0121 15:25:07.384006 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:07 crc kubenswrapper[5021]: I0121 15:25:07.384023 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:07Z","lastTransitionTime":"2026-01-21T15:25:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:07 crc kubenswrapper[5021]: I0121 15:25:07.486942 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:07 crc kubenswrapper[5021]: I0121 15:25:07.486981 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:07 crc kubenswrapper[5021]: I0121 15:25:07.486991 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:07 crc kubenswrapper[5021]: I0121 15:25:07.487008 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:07 crc kubenswrapper[5021]: I0121 15:25:07.487020 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:07Z","lastTransitionTime":"2026-01-21T15:25:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:07 crc kubenswrapper[5021]: I0121 15:25:07.590753 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:07 crc kubenswrapper[5021]: I0121 15:25:07.590827 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:07 crc kubenswrapper[5021]: I0121 15:25:07.590841 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:07 crc kubenswrapper[5021]: I0121 15:25:07.590863 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:07 crc kubenswrapper[5021]: I0121 15:25:07.590881 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:07Z","lastTransitionTime":"2026-01-21T15:25:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:07 crc kubenswrapper[5021]: I0121 15:25:07.694365 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:07 crc kubenswrapper[5021]: I0121 15:25:07.694435 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:07 crc kubenswrapper[5021]: I0121 15:25:07.694449 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:07 crc kubenswrapper[5021]: I0121 15:25:07.694472 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:07 crc kubenswrapper[5021]: I0121 15:25:07.694487 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:07Z","lastTransitionTime":"2026-01-21T15:25:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:07 crc kubenswrapper[5021]: I0121 15:25:07.710872 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-07 15:40:19.943790525 +0000 UTC Jan 21 15:25:07 crc kubenswrapper[5021]: I0121 15:25:07.797622 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:07 crc kubenswrapper[5021]: I0121 15:25:07.797737 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:07 crc kubenswrapper[5021]: I0121 15:25:07.797767 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:07 crc kubenswrapper[5021]: I0121 15:25:07.797804 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:07 crc kubenswrapper[5021]: I0121 15:25:07.797830 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:07Z","lastTransitionTime":"2026-01-21T15:25:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:07 crc kubenswrapper[5021]: I0121 15:25:07.901112 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:07 crc kubenswrapper[5021]: I0121 15:25:07.901193 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:07 crc kubenswrapper[5021]: I0121 15:25:07.901208 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:07 crc kubenswrapper[5021]: I0121 15:25:07.901234 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:07 crc kubenswrapper[5021]: I0121 15:25:07.901261 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:07Z","lastTransitionTime":"2026-01-21T15:25:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.003614 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.003675 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.003691 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.003714 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.003731 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:08Z","lastTransitionTime":"2026-01-21T15:25:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.107061 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.107167 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.107191 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.107241 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.107265 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:08Z","lastTransitionTime":"2026-01-21T15:25:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.210337 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.210416 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.210430 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.210454 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.210473 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:08Z","lastTransitionTime":"2026-01-21T15:25:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.314212 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.314287 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.314299 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.314321 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.314339 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:08Z","lastTransitionTime":"2026-01-21T15:25:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.416838 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.416968 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.417215 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.417248 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.417271 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:08Z","lastTransitionTime":"2026-01-21T15:25:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.520871 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.521010 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.521029 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.521059 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.521090 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:08Z","lastTransitionTime":"2026-01-21T15:25:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.593249 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.593527 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:25:08 crc kubenswrapper[5021]: E0121 15:25:08.594296 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:25:40.594257443 +0000 UTC m=+82.129371342 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:25:08 crc kubenswrapper[5021]: E0121 15:25:08.594301 5021 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 21 15:25:08 crc kubenswrapper[5021]: E0121 15:25:08.594391 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-21 15:25:40.594376337 +0000 UTC m=+82.129490246 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.624550 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.624608 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.624618 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.624637 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.624656 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:08Z","lastTransitionTime":"2026-01-21T15:25:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.694674 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.694748 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.694767 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:25:08 crc kubenswrapper[5021]: E0121 15:25:08.694901 5021 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 21 15:25:08 crc kubenswrapper[5021]: E0121 15:25:08.695007 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-21 15:25:40.694985687 +0000 UTC m=+82.230099576 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 21 15:25:08 crc kubenswrapper[5021]: E0121 15:25:08.695373 5021 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 21 15:25:08 crc kubenswrapper[5021]: E0121 15:25:08.695443 5021 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 21 15:25:08 crc kubenswrapper[5021]: E0121 15:25:08.695476 5021 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 15:25:08 crc kubenswrapper[5021]: E0121 15:25:08.695482 5021 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 21 15:25:08 crc kubenswrapper[5021]: E0121 15:25:08.695530 5021 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 21 15:25:08 crc kubenswrapper[5021]: E0121 15:25:08.695552 5021 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 15:25:08 crc kubenswrapper[5021]: E0121 15:25:08.695624 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-21 15:25:40.695578993 +0000 UTC m=+82.230693032 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 15:25:08 crc kubenswrapper[5021]: E0121 15:25:08.695663 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-21 15:25:40.695646435 +0000 UTC m=+82.230760354 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.712011 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-16 17:43:42.620313571 +0000 UTC Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.727545 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.727618 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.727637 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.727670 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.727702 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:08Z","lastTransitionTime":"2026-01-21T15:25:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.736929 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.736967 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:25:08 crc kubenswrapper[5021]: E0121 15:25:08.737065 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.737151 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.737226 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:25:08 crc kubenswrapper[5021]: E0121 15:25:08.737337 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:25:08 crc kubenswrapper[5021]: E0121 15:25:08.737525 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xtd2p" podUID="cb60592c-6770-457b-b2ae-2c6c8f2a4149" Jan 21 15:25:08 crc kubenswrapper[5021]: E0121 15:25:08.737666 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.753406 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:08Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.773353 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbaeb819c15bc6d2c7a27387f825f6f2b84a9671ca0d4440fc56b98c9bab7b52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf2232b9e5a3d00ab5e6640b47c6fddffc45b84322505c86450c23e884607366\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee527cf2f3d51077144d81d16dbb6b3563f6e40e30ba92d502b09801ecbca2ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://33a648b0889c361c99d72d947f51b4bf9ed5373bb3192c06f787a46be05694d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3346a404ecc46aaf27f9eebec820eceb15899c2da83fd513d8c1fde406d47ff8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ed7c1837f033493cb08cde88c75467a9fa1dda6ca0f1c72bb2f0c9dccfd0773\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ea556125c81d85d41ebe6a38fcace0b317ecb58c429ae80ae0772790243d107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9ea556125c81d85d41ebe6a38fcace0b317ecb58c429ae80ae0772790243d107\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T15:24:54Z\\\",\\\"message\\\":\\\"\\\\nI0121 15:24:53.899464 6479 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0121 15:24:53.899488 6479 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0121 15:24:53.899532 6479 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0121 15:24:53.899495 6479 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0121 15:24:53.899556 6479 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0121 15:24:53.899588 6479 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0121 15:24:53.899608 6479 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0121 15:24:53.899643 6479 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0121 15:24:53.899726 6479 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0121 15:24:53.899733 6479 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0121 15:24:53.899739 6479 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0121 15:24:53.899774 6479 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0121 15:24:53.899780 6479 handler.go:208] Removed *v1.Node event handler 7\\\\nI0121 15:24:53.899819 6479 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0121 15:24:53.899810 6479 factory.go:656] Stopping watch factory\\\\nI0121 15:24:53.899846 6479 ovnkube.go:599] Stopped ovnkube\\\\nI01\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-9flhm_openshift-ovn-kubernetes(2e3f3965-4473-46d7-a613-2ed3e4b10ad7)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb81f970e9b92212535b8961f324f7e9904f3ead656d5aea28d1a7054245ede8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9flhm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:08Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.786213 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dq2bd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"67cee991-0227-45a5-bb2d-226481f03fd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45a1ed486d5aef56ffd0f6ed50755a384ab94ce452ea9b4d51804fdca6eb179b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8fmw9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:45Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dq2bd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:08Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.801739 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:08Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.821720 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-k9hxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ddf892f9-a048-4335-995e-de581763d230\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d45ae12ddf6d1ac783e31194495a45834964d0379bda6db0655e0194e6f6273b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34249e9db1c2f9f8a1d6f840f32de9cfa2b3f071de7a72784674e64607f513b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34249e9db1c2f9f8a1d6f840f32de9cfa2b3f071de7a72784674e64607f513b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-k9hxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:08Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.830374 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.830439 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.830450 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.830473 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.830486 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:08Z","lastTransitionTime":"2026-01-21T15:25:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.838775 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ce8d981292a75a56750a6c51bf6f523a6b29e0496d5b406455711ab49c4c81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c5895dd4f412ff99b134a78d67b65a3792a50da3f9a88407af678092a29d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n22xz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:08Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.854034 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0aed55e8987f8148a0e729da9e5dcad94bc05e1441eb8e087e05d8b81da4d2df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:08Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.865980 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-xtd2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb60592c-6770-457b-b2ae-2c6c8f2a4149\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xbnsh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xbnsh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:56Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-xtd2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:08Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.880798 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97953e2c-3d85-44a4-9900-b3c53432cc90\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1f9f8b3712320f6149f9124f2c3d85a9957a64678d7f1d0a11728c00afc74cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f805306703ac31a2921b4d3bfd202ef7c3c71e5954c987ad3b4bbd4827b46568\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4543419f7cc5df1a3788158c9b4540bb0e5320349e033d6a4a495267a84f5dd8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effb345da8dd5ec896b8b390b24c11d5fafc667b987d984ea5df7647068c4f46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:08Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.894087 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bdccebb0-daa8-4163-87a3-191bef44cf94\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0650e3b31b409afa68b74eb066029d1ac832f14615432265ce968cbe9b89c78a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60ce022aa50e5962258046fb06f1008a15dab87927995d4308d0c2dc75fb9d9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e5bf8c1e3e1a89326ac8ad0bb8027da467c25709d34c16f81fd8915cc0cf76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19a3d19b378e54ad8ff325f81e065e19a20aad97b002393619e9165653d75a78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19a3d19b378e54ad8ff325f81e065e19a20aad97b002393619e9165653d75a78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:08Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.910999 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:08Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.925426 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-597dt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2265b990-9ae6-48a6-b93e-c91bf08a41f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7264e502c2112ab6e5c6ef790a042b5e3fd99d2f1fc117ca3c1245997cabdf59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgbzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://96d3eaf7bd96bfc2526f43d3a02a4d61c9ebfd3562ee0cbbe3a3fe19ddf2429c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgbzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-597dt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:08Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.933053 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.933090 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.933099 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.933113 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.933122 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:08Z","lastTransitionTime":"2026-01-21T15:25:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.939013 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bea603621cfb30c2b0a1dc2021ea515cb87cd590232f1c4696341f7eca1d4535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10f4a9dbab7b11fa3eaaae6060df484ff7e212710188c29acc1834f07310d49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbcf60368c95e2747fd275984fc6cfc714ca37c93d92edc446643a41c4022c56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39f53539825ef70fcde1b61f8e650f22c059fd674523bfe1882b9844923ce9c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6064978522c6ddd526c20221a2d4a73961beaea35b31eab9598087b1af017753\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"stHeaderAuthRequestController\\\\nI0121 15:24:36.730187 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 15:24:36.730206 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 15:24:36.730225 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 15:24:36.730232 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 15:24:36.735520 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0121 15:24:36.735652 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735731 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 15:24:36.735756 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 15:24:36.735765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 15:24:36.735770 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0121 15:24:36.737297 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0121 15:24:36.741633 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741680 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741699 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-1615556949/tls.crt::/tmp/serving-cert-1615556949/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769009060\\\\\\\\\\\\\\\" (2026-01-21 15:24:20 +0000 UTC to 2026-02-20 15:24:21 +0000 UTC (now=2026-01-21 15:24:36.741651328 +0000 UTC))\\\\\\\"\\\\nF0121 15:24:36.741699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06cf2acfbc236dc530bb69dff9e6e83755c426daba101b6b6abe9c23684d2b7d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:08Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.955309 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cec185f78f20cb42969ab7a1c67289c23ab5714e5c397b1fb5ee55540adcc80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:08Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.973083 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://da2a92bf0daa0e03e89d2a5ff93137363faab6817894aadb90e424b35c878da6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e8a3282b80d888e687ba06911f278868d7f50c3f0c52745beebc66440deeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:08Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.984346 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vg9bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78b7c66b-9935-480c-bf2e-9109b6141006\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9036131b35dd8128019792e8c0100da93fec521e8b003b05f17196b85991d46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t65tk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vg9bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:08Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:08 crc kubenswrapper[5021]: I0121 15:25:08.997204 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd7j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee3a703c230946ab5d32db22cafa6b8a8945566d53e5b97402adaf6701f3addd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9rhz8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd7j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:08Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:09 crc kubenswrapper[5021]: I0121 15:25:09.035230 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:09 crc kubenswrapper[5021]: I0121 15:25:09.035275 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:09 crc kubenswrapper[5021]: I0121 15:25:09.035290 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:09 crc kubenswrapper[5021]: I0121 15:25:09.035309 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:09 crc kubenswrapper[5021]: I0121 15:25:09.035320 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:09Z","lastTransitionTime":"2026-01-21T15:25:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:09 crc kubenswrapper[5021]: I0121 15:25:09.137781 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:09 crc kubenswrapper[5021]: I0121 15:25:09.137819 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:09 crc kubenswrapper[5021]: I0121 15:25:09.137860 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:09 crc kubenswrapper[5021]: I0121 15:25:09.137878 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:09 crc kubenswrapper[5021]: I0121 15:25:09.137888 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:09Z","lastTransitionTime":"2026-01-21T15:25:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:09 crc kubenswrapper[5021]: I0121 15:25:09.240785 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:09 crc kubenswrapper[5021]: I0121 15:25:09.240829 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:09 crc kubenswrapper[5021]: I0121 15:25:09.240838 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:09 crc kubenswrapper[5021]: I0121 15:25:09.240852 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:09 crc kubenswrapper[5021]: I0121 15:25:09.240861 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:09Z","lastTransitionTime":"2026-01-21T15:25:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:09 crc kubenswrapper[5021]: I0121 15:25:09.343879 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:09 crc kubenswrapper[5021]: I0121 15:25:09.343967 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:09 crc kubenswrapper[5021]: I0121 15:25:09.343981 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:09 crc kubenswrapper[5021]: I0121 15:25:09.344003 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:09 crc kubenswrapper[5021]: I0121 15:25:09.344016 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:09Z","lastTransitionTime":"2026-01-21T15:25:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:09 crc kubenswrapper[5021]: I0121 15:25:09.447420 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:09 crc kubenswrapper[5021]: I0121 15:25:09.447485 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:09 crc kubenswrapper[5021]: I0121 15:25:09.447503 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:09 crc kubenswrapper[5021]: I0121 15:25:09.447526 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:09 crc kubenswrapper[5021]: I0121 15:25:09.447544 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:09Z","lastTransitionTime":"2026-01-21T15:25:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:09 crc kubenswrapper[5021]: I0121 15:25:09.550576 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:09 crc kubenswrapper[5021]: I0121 15:25:09.550629 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:09 crc kubenswrapper[5021]: I0121 15:25:09.550639 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:09 crc kubenswrapper[5021]: I0121 15:25:09.550655 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:09 crc kubenswrapper[5021]: I0121 15:25:09.550667 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:09Z","lastTransitionTime":"2026-01-21T15:25:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:09 crc kubenswrapper[5021]: I0121 15:25:09.653814 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:09 crc kubenswrapper[5021]: I0121 15:25:09.653885 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:09 crc kubenswrapper[5021]: I0121 15:25:09.653929 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:09 crc kubenswrapper[5021]: I0121 15:25:09.653958 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:09 crc kubenswrapper[5021]: I0121 15:25:09.653975 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:09Z","lastTransitionTime":"2026-01-21T15:25:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:09 crc kubenswrapper[5021]: I0121 15:25:09.712784 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-05 12:01:13.129132038 +0000 UTC Jan 21 15:25:09 crc kubenswrapper[5021]: I0121 15:25:09.756328 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:09 crc kubenswrapper[5021]: I0121 15:25:09.756380 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:09 crc kubenswrapper[5021]: I0121 15:25:09.756397 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:09 crc kubenswrapper[5021]: I0121 15:25:09.756419 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:09 crc kubenswrapper[5021]: I0121 15:25:09.756434 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:09Z","lastTransitionTime":"2026-01-21T15:25:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:09 crc kubenswrapper[5021]: I0121 15:25:09.859798 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:09 crc kubenswrapper[5021]: I0121 15:25:09.859880 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:09 crc kubenswrapper[5021]: I0121 15:25:09.859895 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:09 crc kubenswrapper[5021]: I0121 15:25:09.859948 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:09 crc kubenswrapper[5021]: I0121 15:25:09.859962 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:09Z","lastTransitionTime":"2026-01-21T15:25:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:09 crc kubenswrapper[5021]: I0121 15:25:09.962262 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:09 crc kubenswrapper[5021]: I0121 15:25:09.962321 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:09 crc kubenswrapper[5021]: I0121 15:25:09.962335 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:09 crc kubenswrapper[5021]: I0121 15:25:09.962354 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:09 crc kubenswrapper[5021]: I0121 15:25:09.962367 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:09Z","lastTransitionTime":"2026-01-21T15:25:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.065554 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.065617 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.065630 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.065657 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.065680 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:10Z","lastTransitionTime":"2026-01-21T15:25:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.169267 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.169312 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.169323 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.169337 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.169346 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:10Z","lastTransitionTime":"2026-01-21T15:25:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.195425 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.195472 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.195482 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.195497 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.195507 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:10Z","lastTransitionTime":"2026-01-21T15:25:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:10 crc kubenswrapper[5021]: E0121 15:25:10.214115 5021 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90fd653a-5482-4360-a078-7b7d7b2b9201\\\",\\\"systemUUID\\\":\\\"758eea6e-3652-403c-8df7-2cf690a0b7a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:10Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.219333 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.219380 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.219389 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.219408 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.219418 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:10Z","lastTransitionTime":"2026-01-21T15:25:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:10 crc kubenswrapper[5021]: E0121 15:25:10.234359 5021 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90fd653a-5482-4360-a078-7b7d7b2b9201\\\",\\\"systemUUID\\\":\\\"758eea6e-3652-403c-8df7-2cf690a0b7a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:10Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.238556 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.238710 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.238797 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.238873 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.238968 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:10Z","lastTransitionTime":"2026-01-21T15:25:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:10 crc kubenswrapper[5021]: E0121 15:25:10.252581 5021 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90fd653a-5482-4360-a078-7b7d7b2b9201\\\",\\\"systemUUID\\\":\\\"758eea6e-3652-403c-8df7-2cf690a0b7a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:10Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.257789 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.257847 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.257858 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.257879 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.257892 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:10Z","lastTransitionTime":"2026-01-21T15:25:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:10 crc kubenswrapper[5021]: E0121 15:25:10.271650 5021 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90fd653a-5482-4360-a078-7b7d7b2b9201\\\",\\\"systemUUID\\\":\\\"758eea6e-3652-403c-8df7-2cf690a0b7a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:10Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.276136 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.276180 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.276191 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.276211 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.276223 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:10Z","lastTransitionTime":"2026-01-21T15:25:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:10 crc kubenswrapper[5021]: E0121 15:25:10.291148 5021 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90fd653a-5482-4360-a078-7b7d7b2b9201\\\",\\\"systemUUID\\\":\\\"758eea6e-3652-403c-8df7-2cf690a0b7a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:10Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:10 crc kubenswrapper[5021]: E0121 15:25:10.291287 5021 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.293631 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.293692 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.293707 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.293732 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.293745 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:10Z","lastTransitionTime":"2026-01-21T15:25:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.396595 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.396645 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.396656 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.396673 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.396688 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:10Z","lastTransitionTime":"2026-01-21T15:25:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.499889 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.499949 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.499961 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.499977 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.499988 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:10Z","lastTransitionTime":"2026-01-21T15:25:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.602698 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.602760 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.602771 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.602788 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.602805 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:10Z","lastTransitionTime":"2026-01-21T15:25:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.705424 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.705481 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.705493 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.705514 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.705531 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:10Z","lastTransitionTime":"2026-01-21T15:25:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.713666 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-14 20:14:34.540154534 +0000 UTC Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.737731 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.737861 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:25:10 crc kubenswrapper[5021]: E0121 15:25:10.737870 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.738020 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.737989 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:25:10 crc kubenswrapper[5021]: E0121 15:25:10.738144 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xtd2p" podUID="cb60592c-6770-457b-b2ae-2c6c8f2a4149" Jan 21 15:25:10 crc kubenswrapper[5021]: E0121 15:25:10.738314 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:25:10 crc kubenswrapper[5021]: E0121 15:25:10.738420 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.808009 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.808066 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.808079 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.808099 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.808112 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:10Z","lastTransitionTime":"2026-01-21T15:25:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.911387 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.911451 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.911472 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.911499 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:10 crc kubenswrapper[5021]: I0121 15:25:10.911519 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:10Z","lastTransitionTime":"2026-01-21T15:25:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:11 crc kubenswrapper[5021]: I0121 15:25:11.014529 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:11 crc kubenswrapper[5021]: I0121 15:25:11.015228 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:11 crc kubenswrapper[5021]: I0121 15:25:11.015345 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:11 crc kubenswrapper[5021]: I0121 15:25:11.015458 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:11 crc kubenswrapper[5021]: I0121 15:25:11.015574 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:11Z","lastTransitionTime":"2026-01-21T15:25:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:11 crc kubenswrapper[5021]: I0121 15:25:11.118897 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:11 crc kubenswrapper[5021]: I0121 15:25:11.118962 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:11 crc kubenswrapper[5021]: I0121 15:25:11.118975 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:11 crc kubenswrapper[5021]: I0121 15:25:11.118998 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:11 crc kubenswrapper[5021]: I0121 15:25:11.119011 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:11Z","lastTransitionTime":"2026-01-21T15:25:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:11 crc kubenswrapper[5021]: I0121 15:25:11.222026 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:11 crc kubenswrapper[5021]: I0121 15:25:11.222089 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:11 crc kubenswrapper[5021]: I0121 15:25:11.222108 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:11 crc kubenswrapper[5021]: I0121 15:25:11.222134 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:11 crc kubenswrapper[5021]: I0121 15:25:11.222152 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:11Z","lastTransitionTime":"2026-01-21T15:25:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:11 crc kubenswrapper[5021]: I0121 15:25:11.324338 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:11 crc kubenswrapper[5021]: I0121 15:25:11.324385 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:11 crc kubenswrapper[5021]: I0121 15:25:11.324398 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:11 crc kubenswrapper[5021]: I0121 15:25:11.324415 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:11 crc kubenswrapper[5021]: I0121 15:25:11.324428 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:11Z","lastTransitionTime":"2026-01-21T15:25:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:11 crc kubenswrapper[5021]: I0121 15:25:11.426932 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:11 crc kubenswrapper[5021]: I0121 15:25:11.426985 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:11 crc kubenswrapper[5021]: I0121 15:25:11.426999 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:11 crc kubenswrapper[5021]: I0121 15:25:11.427021 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:11 crc kubenswrapper[5021]: I0121 15:25:11.427047 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:11Z","lastTransitionTime":"2026-01-21T15:25:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:11 crc kubenswrapper[5021]: I0121 15:25:11.529598 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:11 crc kubenswrapper[5021]: I0121 15:25:11.529690 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:11 crc kubenswrapper[5021]: I0121 15:25:11.529710 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:11 crc kubenswrapper[5021]: I0121 15:25:11.529740 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:11 crc kubenswrapper[5021]: I0121 15:25:11.529756 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:11Z","lastTransitionTime":"2026-01-21T15:25:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:11 crc kubenswrapper[5021]: I0121 15:25:11.633611 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:11 crc kubenswrapper[5021]: I0121 15:25:11.633684 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:11 crc kubenswrapper[5021]: I0121 15:25:11.633697 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:11 crc kubenswrapper[5021]: I0121 15:25:11.633723 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:11 crc kubenswrapper[5021]: I0121 15:25:11.633740 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:11Z","lastTransitionTime":"2026-01-21T15:25:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:11 crc kubenswrapper[5021]: I0121 15:25:11.714034 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-21 04:47:19.597856853 +0000 UTC Jan 21 15:25:11 crc kubenswrapper[5021]: I0121 15:25:11.736855 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:11 crc kubenswrapper[5021]: I0121 15:25:11.736932 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:11 crc kubenswrapper[5021]: I0121 15:25:11.736951 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:11 crc kubenswrapper[5021]: I0121 15:25:11.736972 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:11 crc kubenswrapper[5021]: I0121 15:25:11.736987 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:11Z","lastTransitionTime":"2026-01-21T15:25:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:11 crc kubenswrapper[5021]: I0121 15:25:11.737675 5021 scope.go:117] "RemoveContainer" containerID="9ea556125c81d85d41ebe6a38fcace0b317ecb58c429ae80ae0772790243d107" Jan 21 15:25:11 crc kubenswrapper[5021]: I0121 15:25:11.839857 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:11 crc kubenswrapper[5021]: I0121 15:25:11.839899 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:11 crc kubenswrapper[5021]: I0121 15:25:11.839927 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:11 crc kubenswrapper[5021]: I0121 15:25:11.839944 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:11 crc kubenswrapper[5021]: I0121 15:25:11.839955 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:11Z","lastTransitionTime":"2026-01-21T15:25:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:11 crc kubenswrapper[5021]: I0121 15:25:11.943998 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:11 crc kubenswrapper[5021]: I0121 15:25:11.944633 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:11 crc kubenswrapper[5021]: I0121 15:25:11.944653 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:11 crc kubenswrapper[5021]: I0121 15:25:11.944683 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:11 crc kubenswrapper[5021]: I0121 15:25:11.944700 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:11Z","lastTransitionTime":"2026-01-21T15:25:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.047310 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.047353 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.047365 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.047382 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.047393 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:12Z","lastTransitionTime":"2026-01-21T15:25:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.150440 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.150480 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.150492 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.150508 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.150520 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:12Z","lastTransitionTime":"2026-01-21T15:25:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.255965 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.256006 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.256017 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.256036 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.256047 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:12Z","lastTransitionTime":"2026-01-21T15:25:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.358860 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.358938 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.358958 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.358979 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.358996 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:12Z","lastTransitionTime":"2026-01-21T15:25:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.403558 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-9flhm_2e3f3965-4473-46d7-a613-2ed3e4b10ad7/ovnkube-controller/1.log" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.407844 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" event={"ID":"2e3f3965-4473-46d7-a613-2ed3e4b10ad7","Type":"ContainerStarted","Data":"6a2c749aca14ef61231e1298f1fca331c578ca6eb0ca298d8db61ac45db9a1f6"} Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.408558 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.427287 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:12Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.457528 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbaeb819c15bc6d2c7a27387f825f6f2b84a9671ca0d4440fc56b98c9bab7b52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf2232b9e5a3d00ab5e6640b47c6fddffc45b84322505c86450c23e884607366\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee527cf2f3d51077144d81d16dbb6b3563f6e40e30ba92d502b09801ecbca2ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://33a648b0889c361c99d72d947f51b4bf9ed5373bb3192c06f787a46be05694d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3346a404ecc46aaf27f9eebec820eceb15899c2da83fd513d8c1fde406d47ff8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ed7c1837f033493cb08cde88c75467a9fa1dda6ca0f1c72bb2f0c9dccfd0773\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a2c749aca14ef61231e1298f1fca331c578ca6eb0ca298d8db61ac45db9a1f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9ea556125c81d85d41ebe6a38fcace0b317ecb58c429ae80ae0772790243d107\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T15:24:54Z\\\",\\\"message\\\":\\\"\\\\nI0121 15:24:53.899464 6479 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0121 15:24:53.899488 6479 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0121 15:24:53.899532 6479 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0121 15:24:53.899495 6479 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0121 15:24:53.899556 6479 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0121 15:24:53.899588 6479 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0121 15:24:53.899608 6479 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0121 15:24:53.899643 6479 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0121 15:24:53.899726 6479 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0121 15:24:53.899733 6479 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0121 15:24:53.899739 6479 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0121 15:24:53.899774 6479 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0121 15:24:53.899780 6479 handler.go:208] Removed *v1.Node event handler 7\\\\nI0121 15:24:53.899819 6479 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0121 15:24:53.899810 6479 factory.go:656] Stopping watch factory\\\\nI0121 15:24:53.899846 6479 ovnkube.go:599] Stopped ovnkube\\\\nI01\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:25:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb81f970e9b92212535b8961f324f7e9904f3ead656d5aea28d1a7054245ede8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9flhm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:12Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.460951 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.460982 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.460996 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.461018 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.461032 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:12Z","lastTransitionTime":"2026-01-21T15:25:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.472823 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dq2bd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"67cee991-0227-45a5-bb2d-226481f03fd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45a1ed486d5aef56ffd0f6ed50755a384ab94ce452ea9b4d51804fdca6eb179b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8fmw9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:45Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dq2bd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:12Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.493255 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:12Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.510376 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-k9hxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ddf892f9-a048-4335-995e-de581763d230\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d45ae12ddf6d1ac783e31194495a45834964d0379bda6db0655e0194e6f6273b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34249e9db1c2f9f8a1d6f840f32de9cfa2b3f071de7a72784674e64607f513b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34249e9db1c2f9f8a1d6f840f32de9cfa2b3f071de7a72784674e64607f513b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-k9hxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:12Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.534364 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ce8d981292a75a56750a6c51bf6f523a6b29e0496d5b406455711ab49c4c81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c5895dd4f412ff99b134a78d67b65a3792a50da3f9a88407af678092a29d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n22xz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:12Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.550607 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0aed55e8987f8148a0e729da9e5dcad94bc05e1441eb8e087e05d8b81da4d2df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:12Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.562992 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.563043 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.563057 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.563075 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.563089 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:12Z","lastTransitionTime":"2026-01-21T15:25:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.564098 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-xtd2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb60592c-6770-457b-b2ae-2c6c8f2a4149\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xbnsh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xbnsh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:56Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-xtd2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:12Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.578252 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97953e2c-3d85-44a4-9900-b3c53432cc90\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1f9f8b3712320f6149f9124f2c3d85a9957a64678d7f1d0a11728c00afc74cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f805306703ac31a2921b4d3bfd202ef7c3c71e5954c987ad3b4bbd4827b46568\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4543419f7cc5df1a3788158c9b4540bb0e5320349e033d6a4a495267a84f5dd8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effb345da8dd5ec896b8b390b24c11d5fafc667b987d984ea5df7647068c4f46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:12Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.590630 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bdccebb0-daa8-4163-87a3-191bef44cf94\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0650e3b31b409afa68b74eb066029d1ac832f14615432265ce968cbe9b89c78a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60ce022aa50e5962258046fb06f1008a15dab87927995d4308d0c2dc75fb9d9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e5bf8c1e3e1a89326ac8ad0bb8027da467c25709d34c16f81fd8915cc0cf76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19a3d19b378e54ad8ff325f81e065e19a20aad97b002393619e9165653d75a78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19a3d19b378e54ad8ff325f81e065e19a20aad97b002393619e9165653d75a78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:12Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.605236 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:12Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.620343 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-597dt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2265b990-9ae6-48a6-b93e-c91bf08a41f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7264e502c2112ab6e5c6ef790a042b5e3fd99d2f1fc117ca3c1245997cabdf59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgbzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://96d3eaf7bd96bfc2526f43d3a02a4d61c9ebfd3562ee0cbbe3a3fe19ddf2429c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgbzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-597dt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:12Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.639654 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bea603621cfb30c2b0a1dc2021ea515cb87cd590232f1c4696341f7eca1d4535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10f4a9dbab7b11fa3eaaae6060df484ff7e212710188c29acc1834f07310d49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbcf60368c95e2747fd275984fc6cfc714ca37c93d92edc446643a41c4022c56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39f53539825ef70fcde1b61f8e650f22c059fd674523bfe1882b9844923ce9c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6064978522c6ddd526c20221a2d4a73961beaea35b31eab9598087b1af017753\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"stHeaderAuthRequestController\\\\nI0121 15:24:36.730187 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 15:24:36.730206 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 15:24:36.730225 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 15:24:36.730232 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 15:24:36.735520 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0121 15:24:36.735652 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735731 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 15:24:36.735756 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 15:24:36.735765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 15:24:36.735770 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0121 15:24:36.737297 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0121 15:24:36.741633 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741680 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741699 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-1615556949/tls.crt::/tmp/serving-cert-1615556949/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769009060\\\\\\\\\\\\\\\" (2026-01-21 15:24:20 +0000 UTC to 2026-02-20 15:24:21 +0000 UTC (now=2026-01-21 15:24:36.741651328 +0000 UTC))\\\\\\\"\\\\nF0121 15:24:36.741699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06cf2acfbc236dc530bb69dff9e6e83755c426daba101b6b6abe9c23684d2b7d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:12Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.653557 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cec185f78f20cb42969ab7a1c67289c23ab5714e5c397b1fb5ee55540adcc80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:12Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.665540 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.665589 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.665601 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.665654 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.665668 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:12Z","lastTransitionTime":"2026-01-21T15:25:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.668114 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://da2a92bf0daa0e03e89d2a5ff93137363faab6817894aadb90e424b35c878da6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e8a3282b80d888e687ba06911f278868d7f50c3f0c52745beebc66440deeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:12Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.678594 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vg9bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78b7c66b-9935-480c-bf2e-9109b6141006\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9036131b35dd8128019792e8c0100da93fec521e8b003b05f17196b85991d46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t65tk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vg9bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:12Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.693189 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd7j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee3a703c230946ab5d32db22cafa6b8a8945566d53e5b97402adaf6701f3addd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9rhz8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd7j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:12Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.714436 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-10 08:47:29.98835621 +0000 UTC Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.737271 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.737311 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.737324 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:25:12 crc kubenswrapper[5021]: E0121 15:25:12.737425 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:25:12 crc kubenswrapper[5021]: E0121 15:25:12.737546 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.737560 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:25:12 crc kubenswrapper[5021]: E0121 15:25:12.737778 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xtd2p" podUID="cb60592c-6770-457b-b2ae-2c6c8f2a4149" Jan 21 15:25:12 crc kubenswrapper[5021]: E0121 15:25:12.737856 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.740274 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/cb60592c-6770-457b-b2ae-2c6c8f2a4149-metrics-certs\") pod \"network-metrics-daemon-xtd2p\" (UID: \"cb60592c-6770-457b-b2ae-2c6c8f2a4149\") " pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:25:12 crc kubenswrapper[5021]: E0121 15:25:12.740415 5021 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 21 15:25:12 crc kubenswrapper[5021]: E0121 15:25:12.740466 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cb60592c-6770-457b-b2ae-2c6c8f2a4149-metrics-certs podName:cb60592c-6770-457b-b2ae-2c6c8f2a4149 nodeName:}" failed. No retries permitted until 2026-01-21 15:25:28.740450827 +0000 UTC m=+70.275564716 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/cb60592c-6770-457b-b2ae-2c6c8f2a4149-metrics-certs") pod "network-metrics-daemon-xtd2p" (UID: "cb60592c-6770-457b-b2ae-2c6c8f2a4149") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.768692 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.768745 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.768758 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.768776 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.768788 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:12Z","lastTransitionTime":"2026-01-21T15:25:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.870863 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.870948 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.870976 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.870994 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.871006 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:12Z","lastTransitionTime":"2026-01-21T15:25:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.973737 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.973794 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.973804 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.973821 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:12 crc kubenswrapper[5021]: I0121 15:25:12.973832 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:12Z","lastTransitionTime":"2026-01-21T15:25:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.076159 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.076196 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.076214 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.076229 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.076239 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:13Z","lastTransitionTime":"2026-01-21T15:25:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.179099 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.179158 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.179177 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.179198 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.179212 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:13Z","lastTransitionTime":"2026-01-21T15:25:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.281856 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.281896 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.281917 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.281931 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.281942 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:13Z","lastTransitionTime":"2026-01-21T15:25:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.384307 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.384378 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.384390 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.384406 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.384416 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:13Z","lastTransitionTime":"2026-01-21T15:25:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.415297 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-9flhm_2e3f3965-4473-46d7-a613-2ed3e4b10ad7/ovnkube-controller/2.log" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.416445 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-9flhm_2e3f3965-4473-46d7-a613-2ed3e4b10ad7/ovnkube-controller/1.log" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.419732 5021 generic.go:334] "Generic (PLEG): container finished" podID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerID="6a2c749aca14ef61231e1298f1fca331c578ca6eb0ca298d8db61ac45db9a1f6" exitCode=1 Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.419771 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" event={"ID":"2e3f3965-4473-46d7-a613-2ed3e4b10ad7","Type":"ContainerDied","Data":"6a2c749aca14ef61231e1298f1fca331c578ca6eb0ca298d8db61ac45db9a1f6"} Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.419805 5021 scope.go:117] "RemoveContainer" containerID="9ea556125c81d85d41ebe6a38fcace0b317ecb58c429ae80ae0772790243d107" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.421965 5021 scope.go:117] "RemoveContainer" containerID="6a2c749aca14ef61231e1298f1fca331c578ca6eb0ca298d8db61ac45db9a1f6" Jan 21 15:25:13 crc kubenswrapper[5021]: E0121 15:25:13.422575 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-9flhm_openshift-ovn-kubernetes(2e3f3965-4473-46d7-a613-2ed3e4b10ad7)\"" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.445027 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-k9hxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ddf892f9-a048-4335-995e-de581763d230\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d45ae12ddf6d1ac783e31194495a45834964d0379bda6db0655e0194e6f6273b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34249e9db1c2f9f8a1d6f840f32de9cfa2b3f071de7a72784674e64607f513b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34249e9db1c2f9f8a1d6f840f32de9cfa2b3f071de7a72784674e64607f513b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-k9hxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:13Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.458693 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ce8d981292a75a56750a6c51bf6f523a6b29e0496d5b406455711ab49c4c81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c5895dd4f412ff99b134a78d67b65a3792a50da3f9a88407af678092a29d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n22xz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:13Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.472098 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:13Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.486825 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97953e2c-3d85-44a4-9900-b3c53432cc90\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1f9f8b3712320f6149f9124f2c3d85a9957a64678d7f1d0a11728c00afc74cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f805306703ac31a2921b4d3bfd202ef7c3c71e5954c987ad3b4bbd4827b46568\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4543419f7cc5df1a3788158c9b4540bb0e5320349e033d6a4a495267a84f5dd8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effb345da8dd5ec896b8b390b24c11d5fafc667b987d984ea5df7647068c4f46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:13Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.486928 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.486964 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.486978 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.487001 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.487017 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:13Z","lastTransitionTime":"2026-01-21T15:25:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.500401 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bdccebb0-daa8-4163-87a3-191bef44cf94\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0650e3b31b409afa68b74eb066029d1ac832f14615432265ce968cbe9b89c78a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60ce022aa50e5962258046fb06f1008a15dab87927995d4308d0c2dc75fb9d9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e5bf8c1e3e1a89326ac8ad0bb8027da467c25709d34c16f81fd8915cc0cf76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19a3d19b378e54ad8ff325f81e065e19a20aad97b002393619e9165653d75a78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19a3d19b378e54ad8ff325f81e065e19a20aad97b002393619e9165653d75a78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:13Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.521046 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:13Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.537087 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0aed55e8987f8148a0e729da9e5dcad94bc05e1441eb8e087e05d8b81da4d2df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:13Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.550326 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-xtd2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb60592c-6770-457b-b2ae-2c6c8f2a4149\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xbnsh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xbnsh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:56Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-xtd2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:13Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.569128 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cec185f78f20cb42969ab7a1c67289c23ab5714e5c397b1fb5ee55540adcc80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:13Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.588088 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://da2a92bf0daa0e03e89d2a5ff93137363faab6817894aadb90e424b35c878da6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e8a3282b80d888e687ba06911f278868d7f50c3f0c52745beebc66440deeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:13Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.589400 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.589562 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.589838 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.590200 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.590487 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:13Z","lastTransitionTime":"2026-01-21T15:25:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.602124 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vg9bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78b7c66b-9935-480c-bf2e-9109b6141006\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9036131b35dd8128019792e8c0100da93fec521e8b003b05f17196b85991d46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t65tk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vg9bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:13Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.618688 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd7j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee3a703c230946ab5d32db22cafa6b8a8945566d53e5b97402adaf6701f3addd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9rhz8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd7j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:13Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.634453 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-597dt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2265b990-9ae6-48a6-b93e-c91bf08a41f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7264e502c2112ab6e5c6ef790a042b5e3fd99d2f1fc117ca3c1245997cabdf59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgbzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://96d3eaf7bd96bfc2526f43d3a02a4d61c9ebfd3562ee0cbbe3a3fe19ddf2429c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgbzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-597dt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:13Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.656824 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bea603621cfb30c2b0a1dc2021ea515cb87cd590232f1c4696341f7eca1d4535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10f4a9dbab7b11fa3eaaae6060df484ff7e212710188c29acc1834f07310d49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbcf60368c95e2747fd275984fc6cfc714ca37c93d92edc446643a41c4022c56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39f53539825ef70fcde1b61f8e650f22c059fd674523bfe1882b9844923ce9c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6064978522c6ddd526c20221a2d4a73961beaea35b31eab9598087b1af017753\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"stHeaderAuthRequestController\\\\nI0121 15:24:36.730187 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 15:24:36.730206 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 15:24:36.730225 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 15:24:36.730232 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 15:24:36.735520 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0121 15:24:36.735652 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735731 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 15:24:36.735756 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 15:24:36.735765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 15:24:36.735770 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0121 15:24:36.737297 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0121 15:24:36.741633 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741680 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741699 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-1615556949/tls.crt::/tmp/serving-cert-1615556949/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769009060\\\\\\\\\\\\\\\" (2026-01-21 15:24:20 +0000 UTC to 2026-02-20 15:24:21 +0000 UTC (now=2026-01-21 15:24:36.741651328 +0000 UTC))\\\\\\\"\\\\nF0121 15:24:36.741699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06cf2acfbc236dc530bb69dff9e6e83755c426daba101b6b6abe9c23684d2b7d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:13Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.680010 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbaeb819c15bc6d2c7a27387f825f6f2b84a9671ca0d4440fc56b98c9bab7b52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf2232b9e5a3d00ab5e6640b47c6fddffc45b84322505c86450c23e884607366\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee527cf2f3d51077144d81d16dbb6b3563f6e40e30ba92d502b09801ecbca2ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://33a648b0889c361c99d72d947f51b4bf9ed5373bb3192c06f787a46be05694d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3346a404ecc46aaf27f9eebec820eceb15899c2da83fd513d8c1fde406d47ff8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ed7c1837f033493cb08cde88c75467a9fa1dda6ca0f1c72bb2f0c9dccfd0773\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a2c749aca14ef61231e1298f1fca331c578ca6eb0ca298d8db61ac45db9a1f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9ea556125c81d85d41ebe6a38fcace0b317ecb58c429ae80ae0772790243d107\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T15:24:54Z\\\",\\\"message\\\":\\\"\\\\nI0121 15:24:53.899464 6479 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0121 15:24:53.899488 6479 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0121 15:24:53.899532 6479 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0121 15:24:53.899495 6479 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0121 15:24:53.899556 6479 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0121 15:24:53.899588 6479 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0121 15:24:53.899608 6479 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0121 15:24:53.899643 6479 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0121 15:24:53.899726 6479 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0121 15:24:53.899733 6479 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0121 15:24:53.899739 6479 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0121 15:24:53.899774 6479 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0121 15:24:53.899780 6479 handler.go:208] Removed *v1.Node event handler 7\\\\nI0121 15:24:53.899819 6479 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0121 15:24:53.899810 6479 factory.go:656] Stopping watch factory\\\\nI0121 15:24:53.899846 6479 ovnkube.go:599] Stopped ovnkube\\\\nI01\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a2c749aca14ef61231e1298f1fca331c578ca6eb0ca298d8db61ac45db9a1f6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T15:25:12Z\\\",\\\"message\\\":\\\"-scheduler/openshift-kube-scheduler-crc\\\\nI0121 15:25:12.901764 6701 ovn.go:134] Ensuring zone local for Pod openshift-kube-scheduler/openshift-kube-scheduler-crc in node crc\\\\nI0121 15:25:12.901768 6701 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-scheduler/openshift-kube-scheduler-crc after 0 failed attempt(s)\\\\nI0121 15:25:12.901774 6701 default_network_controller.go:776] Recording success event on pod openshift-kube-scheduler/openshift-kube-scheduler-crc\\\\nI0121 15:25:12.901783 6701 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/multus-additional-cni-plugins-k9hxg\\\\nF0121 15:25:12.901785 6701 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate ha\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:25:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb81f970e9b92212535b8961f324f7e9904f3ead656d5aea28d1a7054245ede8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9flhm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:13Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.692293 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dq2bd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"67cee991-0227-45a5-bb2d-226481f03fd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45a1ed486d5aef56ffd0f6ed50755a384ab94ce452ea9b4d51804fdca6eb179b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8fmw9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:45Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dq2bd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:13Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.694897 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.695032 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.695097 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.695168 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.695281 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:13Z","lastTransitionTime":"2026-01-21T15:25:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.705323 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:13Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.715440 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-10 01:57:33.779890457 +0000 UTC Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.799880 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.799995 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.800019 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.800054 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.800078 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:13Z","lastTransitionTime":"2026-01-21T15:25:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.903303 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.903381 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.903396 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.903424 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:13 crc kubenswrapper[5021]: I0121 15:25:13.903440 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:13Z","lastTransitionTime":"2026-01-21T15:25:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.005856 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.005896 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.005942 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.005960 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.005972 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:14Z","lastTransitionTime":"2026-01-21T15:25:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.108414 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.108477 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.108489 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.108507 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.108519 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:14Z","lastTransitionTime":"2026-01-21T15:25:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.211821 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.211881 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.211892 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.211934 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.211952 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:14Z","lastTransitionTime":"2026-01-21T15:25:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.314313 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.314372 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.314389 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.314414 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.314431 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:14Z","lastTransitionTime":"2026-01-21T15:25:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.417276 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.417323 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.417334 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.417351 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.417361 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:14Z","lastTransitionTime":"2026-01-21T15:25:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.426163 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-9flhm_2e3f3965-4473-46d7-a613-2ed3e4b10ad7/ovnkube-controller/2.log" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.430748 5021 scope.go:117] "RemoveContainer" containerID="6a2c749aca14ef61231e1298f1fca331c578ca6eb0ca298d8db61ac45db9a1f6" Jan 21 15:25:14 crc kubenswrapper[5021]: E0121 15:25:14.430974 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-9flhm_openshift-ovn-kubernetes(2e3f3965-4473-46d7-a613-2ed3e4b10ad7)\"" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.444739 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bea603621cfb30c2b0a1dc2021ea515cb87cd590232f1c4696341f7eca1d4535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10f4a9dbab7b11fa3eaaae6060df484ff7e212710188c29acc1834f07310d49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbcf60368c95e2747fd275984fc6cfc714ca37c93d92edc446643a41c4022c56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39f53539825ef70fcde1b61f8e650f22c059fd674523bfe1882b9844923ce9c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6064978522c6ddd526c20221a2d4a73961beaea35b31eab9598087b1af017753\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"stHeaderAuthRequestController\\\\nI0121 15:24:36.730187 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 15:24:36.730206 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 15:24:36.730225 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 15:24:36.730232 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 15:24:36.735520 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0121 15:24:36.735652 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735731 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 15:24:36.735756 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 15:24:36.735765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 15:24:36.735770 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0121 15:24:36.737297 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0121 15:24:36.741633 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741680 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741699 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-1615556949/tls.crt::/tmp/serving-cert-1615556949/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769009060\\\\\\\\\\\\\\\" (2026-01-21 15:24:20 +0000 UTC to 2026-02-20 15:24:21 +0000 UTC (now=2026-01-21 15:24:36.741651328 +0000 UTC))\\\\\\\"\\\\nF0121 15:24:36.741699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06cf2acfbc236dc530bb69dff9e6e83755c426daba101b6b6abe9c23684d2b7d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:14Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.459462 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cec185f78f20cb42969ab7a1c67289c23ab5714e5c397b1fb5ee55540adcc80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:14Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.479757 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://da2a92bf0daa0e03e89d2a5ff93137363faab6817894aadb90e424b35c878da6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e8a3282b80d888e687ba06911f278868d7f50c3f0c52745beebc66440deeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:14Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.491778 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vg9bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78b7c66b-9935-480c-bf2e-9109b6141006\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9036131b35dd8128019792e8c0100da93fec521e8b003b05f17196b85991d46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t65tk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vg9bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:14Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.502927 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd7j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee3a703c230946ab5d32db22cafa6b8a8945566d53e5b97402adaf6701f3addd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9rhz8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd7j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:14Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.512158 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-597dt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2265b990-9ae6-48a6-b93e-c91bf08a41f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7264e502c2112ab6e5c6ef790a042b5e3fd99d2f1fc117ca3c1245997cabdf59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgbzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://96d3eaf7bd96bfc2526f43d3a02a4d61c9ebfd3562ee0cbbe3a3fe19ddf2429c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgbzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-597dt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:14Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.519415 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.519502 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.519513 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.519530 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.519541 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:14Z","lastTransitionTime":"2026-01-21T15:25:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.522511 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:14Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.539089 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbaeb819c15bc6d2c7a27387f825f6f2b84a9671ca0d4440fc56b98c9bab7b52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf2232b9e5a3d00ab5e6640b47c6fddffc45b84322505c86450c23e884607366\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee527cf2f3d51077144d81d16dbb6b3563f6e40e30ba92d502b09801ecbca2ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://33a648b0889c361c99d72d947f51b4bf9ed5373bb3192c06f787a46be05694d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3346a404ecc46aaf27f9eebec820eceb15899c2da83fd513d8c1fde406d47ff8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ed7c1837f033493cb08cde88c75467a9fa1dda6ca0f1c72bb2f0c9dccfd0773\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a2c749aca14ef61231e1298f1fca331c578ca6eb0ca298d8db61ac45db9a1f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a2c749aca14ef61231e1298f1fca331c578ca6eb0ca298d8db61ac45db9a1f6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T15:25:12Z\\\",\\\"message\\\":\\\"-scheduler/openshift-kube-scheduler-crc\\\\nI0121 15:25:12.901764 6701 ovn.go:134] Ensuring zone local for Pod openshift-kube-scheduler/openshift-kube-scheduler-crc in node crc\\\\nI0121 15:25:12.901768 6701 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-scheduler/openshift-kube-scheduler-crc after 0 failed attempt(s)\\\\nI0121 15:25:12.901774 6701 default_network_controller.go:776] Recording success event on pod openshift-kube-scheduler/openshift-kube-scheduler-crc\\\\nI0121 15:25:12.901783 6701 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/multus-additional-cni-plugins-k9hxg\\\\nF0121 15:25:12.901785 6701 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate ha\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:25:11Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-9flhm_openshift-ovn-kubernetes(2e3f3965-4473-46d7-a613-2ed3e4b10ad7)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb81f970e9b92212535b8961f324f7e9904f3ead656d5aea28d1a7054245ede8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9flhm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:14Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.548271 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dq2bd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"67cee991-0227-45a5-bb2d-226481f03fd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45a1ed486d5aef56ffd0f6ed50755a384ab94ce452ea9b4d51804fdca6eb179b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8fmw9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:45Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dq2bd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:14Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.561802 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:14Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.577096 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-k9hxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ddf892f9-a048-4335-995e-de581763d230\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d45ae12ddf6d1ac783e31194495a45834964d0379bda6db0655e0194e6f6273b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34249e9db1c2f9f8a1d6f840f32de9cfa2b3f071de7a72784674e64607f513b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34249e9db1c2f9f8a1d6f840f32de9cfa2b3f071de7a72784674e64607f513b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-k9hxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:14Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.588703 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ce8d981292a75a56750a6c51bf6f523a6b29e0496d5b406455711ab49c4c81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c5895dd4f412ff99b134a78d67b65a3792a50da3f9a88407af678092a29d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n22xz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:14Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.604163 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97953e2c-3d85-44a4-9900-b3c53432cc90\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1f9f8b3712320f6149f9124f2c3d85a9957a64678d7f1d0a11728c00afc74cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f805306703ac31a2921b4d3bfd202ef7c3c71e5954c987ad3b4bbd4827b46568\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4543419f7cc5df1a3788158c9b4540bb0e5320349e033d6a4a495267a84f5dd8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effb345da8dd5ec896b8b390b24c11d5fafc667b987d984ea5df7647068c4f46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:14Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.622278 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.622317 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.622327 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.622344 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.622356 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:14Z","lastTransitionTime":"2026-01-21T15:25:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.624358 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bdccebb0-daa8-4163-87a3-191bef44cf94\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0650e3b31b409afa68b74eb066029d1ac832f14615432265ce968cbe9b89c78a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60ce022aa50e5962258046fb06f1008a15dab87927995d4308d0c2dc75fb9d9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e5bf8c1e3e1a89326ac8ad0bb8027da467c25709d34c16f81fd8915cc0cf76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19a3d19b378e54ad8ff325f81e065e19a20aad97b002393619e9165653d75a78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19a3d19b378e54ad8ff325f81e065e19a20aad97b002393619e9165653d75a78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:14Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.667610 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:14Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.679686 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0aed55e8987f8148a0e729da9e5dcad94bc05e1441eb8e087e05d8b81da4d2df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:14Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.689540 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-xtd2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb60592c-6770-457b-b2ae-2c6c8f2a4149\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xbnsh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xbnsh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:56Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-xtd2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:14Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.715807 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-02 10:57:03.074613933 +0000 UTC Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.724655 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.724684 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.724695 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.724712 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.724722 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:14Z","lastTransitionTime":"2026-01-21T15:25:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.737152 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.737222 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.737268 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:25:14 crc kubenswrapper[5021]: E0121 15:25:14.737370 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.737384 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:25:14 crc kubenswrapper[5021]: E0121 15:25:14.737466 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:25:14 crc kubenswrapper[5021]: E0121 15:25:14.737872 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xtd2p" podUID="cb60592c-6770-457b-b2ae-2c6c8f2a4149" Jan 21 15:25:14 crc kubenswrapper[5021]: E0121 15:25:14.738024 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.827183 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.827220 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.827229 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.827246 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.827256 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:14Z","lastTransitionTime":"2026-01-21T15:25:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.929787 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.929828 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.929838 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.929853 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:14 crc kubenswrapper[5021]: I0121 15:25:14.929865 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:14Z","lastTransitionTime":"2026-01-21T15:25:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:15 crc kubenswrapper[5021]: I0121 15:25:15.032739 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:15 crc kubenswrapper[5021]: I0121 15:25:15.032808 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:15 crc kubenswrapper[5021]: I0121 15:25:15.032825 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:15 crc kubenswrapper[5021]: I0121 15:25:15.032847 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:15 crc kubenswrapper[5021]: I0121 15:25:15.032863 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:15Z","lastTransitionTime":"2026-01-21T15:25:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:15 crc kubenswrapper[5021]: I0121 15:25:15.135233 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:15 crc kubenswrapper[5021]: I0121 15:25:15.135267 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:15 crc kubenswrapper[5021]: I0121 15:25:15.135277 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:15 crc kubenswrapper[5021]: I0121 15:25:15.135291 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:15 crc kubenswrapper[5021]: I0121 15:25:15.135301 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:15Z","lastTransitionTime":"2026-01-21T15:25:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:15 crc kubenswrapper[5021]: I0121 15:25:15.237465 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:15 crc kubenswrapper[5021]: I0121 15:25:15.237682 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:15 crc kubenswrapper[5021]: I0121 15:25:15.237717 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:15 crc kubenswrapper[5021]: I0121 15:25:15.237738 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:15 crc kubenswrapper[5021]: I0121 15:25:15.237752 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:15Z","lastTransitionTime":"2026-01-21T15:25:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:15 crc kubenswrapper[5021]: I0121 15:25:15.340354 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:15 crc kubenswrapper[5021]: I0121 15:25:15.340399 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:15 crc kubenswrapper[5021]: I0121 15:25:15.340412 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:15 crc kubenswrapper[5021]: I0121 15:25:15.340428 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:15 crc kubenswrapper[5021]: I0121 15:25:15.340440 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:15Z","lastTransitionTime":"2026-01-21T15:25:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:15 crc kubenswrapper[5021]: I0121 15:25:15.443801 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:15 crc kubenswrapper[5021]: I0121 15:25:15.443865 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:15 crc kubenswrapper[5021]: I0121 15:25:15.443890 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:15 crc kubenswrapper[5021]: I0121 15:25:15.443955 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:15 crc kubenswrapper[5021]: I0121 15:25:15.443977 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:15Z","lastTransitionTime":"2026-01-21T15:25:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:15 crc kubenswrapper[5021]: I0121 15:25:15.546489 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:15 crc kubenswrapper[5021]: I0121 15:25:15.546546 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:15 crc kubenswrapper[5021]: I0121 15:25:15.546556 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:15 crc kubenswrapper[5021]: I0121 15:25:15.546570 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:15 crc kubenswrapper[5021]: I0121 15:25:15.546579 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:15Z","lastTransitionTime":"2026-01-21T15:25:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:15 crc kubenswrapper[5021]: I0121 15:25:15.650506 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:15 crc kubenswrapper[5021]: I0121 15:25:15.650556 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:15 crc kubenswrapper[5021]: I0121 15:25:15.650565 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:15 crc kubenswrapper[5021]: I0121 15:25:15.650579 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:15 crc kubenswrapper[5021]: I0121 15:25:15.650588 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:15Z","lastTransitionTime":"2026-01-21T15:25:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:15 crc kubenswrapper[5021]: I0121 15:25:15.716956 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-02 03:11:42.893820589 +0000 UTC Jan 21 15:25:15 crc kubenswrapper[5021]: I0121 15:25:15.753399 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:15 crc kubenswrapper[5021]: I0121 15:25:15.753451 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:15 crc kubenswrapper[5021]: I0121 15:25:15.753464 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:15 crc kubenswrapper[5021]: I0121 15:25:15.753484 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:15 crc kubenswrapper[5021]: I0121 15:25:15.753496 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:15Z","lastTransitionTime":"2026-01-21T15:25:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:15 crc kubenswrapper[5021]: I0121 15:25:15.855842 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:15 crc kubenswrapper[5021]: I0121 15:25:15.855882 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:15 crc kubenswrapper[5021]: I0121 15:25:15.855891 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:15 crc kubenswrapper[5021]: I0121 15:25:15.855932 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:15 crc kubenswrapper[5021]: I0121 15:25:15.855942 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:15Z","lastTransitionTime":"2026-01-21T15:25:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:15 crc kubenswrapper[5021]: I0121 15:25:15.958538 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:15 crc kubenswrapper[5021]: I0121 15:25:15.958621 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:15 crc kubenswrapper[5021]: I0121 15:25:15.958639 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:15 crc kubenswrapper[5021]: I0121 15:25:15.958664 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:15 crc kubenswrapper[5021]: I0121 15:25:15.958682 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:15Z","lastTransitionTime":"2026-01-21T15:25:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.061615 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.061694 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.061706 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.061727 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.061740 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:16Z","lastTransitionTime":"2026-01-21T15:25:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.164442 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.164533 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.164556 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.164592 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.164628 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:16Z","lastTransitionTime":"2026-01-21T15:25:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.267642 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.267685 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.267696 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.267711 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.267724 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:16Z","lastTransitionTime":"2026-01-21T15:25:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.371013 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.371078 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.371093 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.371112 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.371127 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:16Z","lastTransitionTime":"2026-01-21T15:25:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.475099 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.475178 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.475196 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.475224 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.475245 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:16Z","lastTransitionTime":"2026-01-21T15:25:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.578488 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.578551 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.578564 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.578583 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.578596 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:16Z","lastTransitionTime":"2026-01-21T15:25:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.681318 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.681390 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.681404 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.681429 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.681445 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:16Z","lastTransitionTime":"2026-01-21T15:25:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.718080 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-18 00:15:50.678523366 +0000 UTC Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.737600 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.737787 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.737993 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.738100 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:25:16 crc kubenswrapper[5021]: E0121 15:25:16.738240 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:25:16 crc kubenswrapper[5021]: E0121 15:25:16.738112 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:25:16 crc kubenswrapper[5021]: E0121 15:25:16.737829 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:25:16 crc kubenswrapper[5021]: E0121 15:25:16.738397 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xtd2p" podUID="cb60592c-6770-457b-b2ae-2c6c8f2a4149" Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.784213 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.784293 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.784349 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.784384 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.784404 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:16Z","lastTransitionTime":"2026-01-21T15:25:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.888409 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.888479 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.888490 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.888509 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.888521 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:16Z","lastTransitionTime":"2026-01-21T15:25:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.991522 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.991569 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.991583 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.991604 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:16 crc kubenswrapper[5021]: I0121 15:25:16.991619 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:16Z","lastTransitionTime":"2026-01-21T15:25:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:17 crc kubenswrapper[5021]: I0121 15:25:17.094467 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:17 crc kubenswrapper[5021]: I0121 15:25:17.094545 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:17 crc kubenswrapper[5021]: I0121 15:25:17.094568 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:17 crc kubenswrapper[5021]: I0121 15:25:17.094603 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:17 crc kubenswrapper[5021]: I0121 15:25:17.094629 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:17Z","lastTransitionTime":"2026-01-21T15:25:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:17 crc kubenswrapper[5021]: I0121 15:25:17.198818 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:17 crc kubenswrapper[5021]: I0121 15:25:17.198885 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:17 crc kubenswrapper[5021]: I0121 15:25:17.198899 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:17 crc kubenswrapper[5021]: I0121 15:25:17.198946 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:17 crc kubenswrapper[5021]: I0121 15:25:17.198962 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:17Z","lastTransitionTime":"2026-01-21T15:25:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:17 crc kubenswrapper[5021]: I0121 15:25:17.302182 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:17 crc kubenswrapper[5021]: I0121 15:25:17.302239 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:17 crc kubenswrapper[5021]: I0121 15:25:17.302248 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:17 crc kubenswrapper[5021]: I0121 15:25:17.302274 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:17 crc kubenswrapper[5021]: I0121 15:25:17.302287 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:17Z","lastTransitionTime":"2026-01-21T15:25:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:17 crc kubenswrapper[5021]: I0121 15:25:17.409075 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:17 crc kubenswrapper[5021]: I0121 15:25:17.409127 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:17 crc kubenswrapper[5021]: I0121 15:25:17.409148 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:17 crc kubenswrapper[5021]: I0121 15:25:17.409176 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:17 crc kubenswrapper[5021]: I0121 15:25:17.409193 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:17Z","lastTransitionTime":"2026-01-21T15:25:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:17 crc kubenswrapper[5021]: I0121 15:25:17.511566 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:17 crc kubenswrapper[5021]: I0121 15:25:17.511616 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:17 crc kubenswrapper[5021]: I0121 15:25:17.511631 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:17 crc kubenswrapper[5021]: I0121 15:25:17.511649 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:17 crc kubenswrapper[5021]: I0121 15:25:17.511663 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:17Z","lastTransitionTime":"2026-01-21T15:25:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:17 crc kubenswrapper[5021]: I0121 15:25:17.614520 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:17 crc kubenswrapper[5021]: I0121 15:25:17.614579 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:17 crc kubenswrapper[5021]: I0121 15:25:17.614596 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:17 crc kubenswrapper[5021]: I0121 15:25:17.614620 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:17 crc kubenswrapper[5021]: I0121 15:25:17.614637 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:17Z","lastTransitionTime":"2026-01-21T15:25:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:17 crc kubenswrapper[5021]: I0121 15:25:17.717757 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:17 crc kubenswrapper[5021]: I0121 15:25:17.717803 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:17 crc kubenswrapper[5021]: I0121 15:25:17.717816 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:17 crc kubenswrapper[5021]: I0121 15:25:17.717837 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:17 crc kubenswrapper[5021]: I0121 15:25:17.717850 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:17Z","lastTransitionTime":"2026-01-21T15:25:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:17 crc kubenswrapper[5021]: I0121 15:25:17.718980 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-03 21:07:48.017884945 +0000 UTC Jan 21 15:25:17 crc kubenswrapper[5021]: I0121 15:25:17.821188 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:17 crc kubenswrapper[5021]: I0121 15:25:17.821240 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:17 crc kubenswrapper[5021]: I0121 15:25:17.821253 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:17 crc kubenswrapper[5021]: I0121 15:25:17.821674 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:17 crc kubenswrapper[5021]: I0121 15:25:17.821720 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:17Z","lastTransitionTime":"2026-01-21T15:25:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:17 crc kubenswrapper[5021]: I0121 15:25:17.925168 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:17 crc kubenswrapper[5021]: I0121 15:25:17.925241 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:17 crc kubenswrapper[5021]: I0121 15:25:17.925255 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:17 crc kubenswrapper[5021]: I0121 15:25:17.925301 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:17 crc kubenswrapper[5021]: I0121 15:25:17.925316 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:17Z","lastTransitionTime":"2026-01-21T15:25:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.028648 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.028701 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.028714 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.028738 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.028750 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:18Z","lastTransitionTime":"2026-01-21T15:25:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.131237 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.131308 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.131323 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.131338 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.131348 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:18Z","lastTransitionTime":"2026-01-21T15:25:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.234373 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.234428 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.234441 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.234467 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.234482 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:18Z","lastTransitionTime":"2026-01-21T15:25:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.337706 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.338431 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.338460 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.338498 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.338524 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:18Z","lastTransitionTime":"2026-01-21T15:25:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.442696 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.442769 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.442788 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.442815 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.442839 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:18Z","lastTransitionTime":"2026-01-21T15:25:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.546085 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.546153 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.546176 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.546206 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.546224 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:18Z","lastTransitionTime":"2026-01-21T15:25:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.649880 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.649958 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.649972 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.649992 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.650007 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:18Z","lastTransitionTime":"2026-01-21T15:25:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.719858 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-18 06:22:34.034157347 +0000 UTC Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.736954 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.737057 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.737058 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:25:18 crc kubenswrapper[5021]: E0121 15:25:18.737183 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xtd2p" podUID="cb60592c-6770-457b-b2ae-2c6c8f2a4149" Jan 21 15:25:18 crc kubenswrapper[5021]: E0121 15:25:18.737373 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.737402 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:25:18 crc kubenswrapper[5021]: E0121 15:25:18.737510 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:25:18 crc kubenswrapper[5021]: E0121 15:25:18.737618 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.759783 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.759856 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.759875 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.759902 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.759945 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:18Z","lastTransitionTime":"2026-01-21T15:25:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.761799 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bea603621cfb30c2b0a1dc2021ea515cb87cd590232f1c4696341f7eca1d4535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10f4a9dbab7b11fa3eaaae6060df484ff7e212710188c29acc1834f07310d49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbcf60368c95e2747fd275984fc6cfc714ca37c93d92edc446643a41c4022c56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39f53539825ef70fcde1b61f8e650f22c059fd674523bfe1882b9844923ce9c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6064978522c6ddd526c20221a2d4a73961beaea35b31eab9598087b1af017753\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"stHeaderAuthRequestController\\\\nI0121 15:24:36.730187 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 15:24:36.730206 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 15:24:36.730225 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 15:24:36.730232 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 15:24:36.735520 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0121 15:24:36.735652 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735731 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 15:24:36.735756 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 15:24:36.735765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 15:24:36.735770 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0121 15:24:36.737297 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0121 15:24:36.741633 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741680 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741699 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-1615556949/tls.crt::/tmp/serving-cert-1615556949/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769009060\\\\\\\\\\\\\\\" (2026-01-21 15:24:20 +0000 UTC to 2026-02-20 15:24:21 +0000 UTC (now=2026-01-21 15:24:36.741651328 +0000 UTC))\\\\\\\"\\\\nF0121 15:24:36.741699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06cf2acfbc236dc530bb69dff9e6e83755c426daba101b6b6abe9c23684d2b7d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:18Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.778481 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cec185f78f20cb42969ab7a1c67289c23ab5714e5c397b1fb5ee55540adcc80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:18Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.790100 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://da2a92bf0daa0e03e89d2a5ff93137363faab6817894aadb90e424b35c878da6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e8a3282b80d888e687ba06911f278868d7f50c3f0c52745beebc66440deeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:18Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.799141 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vg9bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78b7c66b-9935-480c-bf2e-9109b6141006\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9036131b35dd8128019792e8c0100da93fec521e8b003b05f17196b85991d46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t65tk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vg9bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:18Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.817683 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd7j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee3a703c230946ab5d32db22cafa6b8a8945566d53e5b97402adaf6701f3addd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9rhz8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd7j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:18Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.829921 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-597dt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2265b990-9ae6-48a6-b93e-c91bf08a41f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7264e502c2112ab6e5c6ef790a042b5e3fd99d2f1fc117ca3c1245997cabdf59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgbzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://96d3eaf7bd96bfc2526f43d3a02a4d61c9ebfd3562ee0cbbe3a3fe19ddf2429c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgbzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-597dt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:18Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.842046 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:18Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.859458 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbaeb819c15bc6d2c7a27387f825f6f2b84a9671ca0d4440fc56b98c9bab7b52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf2232b9e5a3d00ab5e6640b47c6fddffc45b84322505c86450c23e884607366\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee527cf2f3d51077144d81d16dbb6b3563f6e40e30ba92d502b09801ecbca2ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://33a648b0889c361c99d72d947f51b4bf9ed5373bb3192c06f787a46be05694d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3346a404ecc46aaf27f9eebec820eceb15899c2da83fd513d8c1fde406d47ff8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ed7c1837f033493cb08cde88c75467a9fa1dda6ca0f1c72bb2f0c9dccfd0773\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a2c749aca14ef61231e1298f1fca331c578ca6eb0ca298d8db61ac45db9a1f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a2c749aca14ef61231e1298f1fca331c578ca6eb0ca298d8db61ac45db9a1f6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T15:25:12Z\\\",\\\"message\\\":\\\"-scheduler/openshift-kube-scheduler-crc\\\\nI0121 15:25:12.901764 6701 ovn.go:134] Ensuring zone local for Pod openshift-kube-scheduler/openshift-kube-scheduler-crc in node crc\\\\nI0121 15:25:12.901768 6701 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-scheduler/openshift-kube-scheduler-crc after 0 failed attempt(s)\\\\nI0121 15:25:12.901774 6701 default_network_controller.go:776] Recording success event on pod openshift-kube-scheduler/openshift-kube-scheduler-crc\\\\nI0121 15:25:12.901783 6701 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/multus-additional-cni-plugins-k9hxg\\\\nF0121 15:25:12.901785 6701 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate ha\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:25:11Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-9flhm_openshift-ovn-kubernetes(2e3f3965-4473-46d7-a613-2ed3e4b10ad7)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb81f970e9b92212535b8961f324f7e9904f3ead656d5aea28d1a7054245ede8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9flhm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:18Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.862017 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.862038 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.862049 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.862063 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.862072 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:18Z","lastTransitionTime":"2026-01-21T15:25:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.869469 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dq2bd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"67cee991-0227-45a5-bb2d-226481f03fd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45a1ed486d5aef56ffd0f6ed50755a384ab94ce452ea9b4d51804fdca6eb179b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8fmw9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:45Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dq2bd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:18Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.880470 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:18Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.892435 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-k9hxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ddf892f9-a048-4335-995e-de581763d230\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d45ae12ddf6d1ac783e31194495a45834964d0379bda6db0655e0194e6f6273b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34249e9db1c2f9f8a1d6f840f32de9cfa2b3f071de7a72784674e64607f513b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34249e9db1c2f9f8a1d6f840f32de9cfa2b3f071de7a72784674e64607f513b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-k9hxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:18Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.902349 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ce8d981292a75a56750a6c51bf6f523a6b29e0496d5b406455711ab49c4c81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c5895dd4f412ff99b134a78d67b65a3792a50da3f9a88407af678092a29d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n22xz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:18Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.915284 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-xtd2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb60592c-6770-457b-b2ae-2c6c8f2a4149\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xbnsh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xbnsh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:56Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-xtd2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:18Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.931004 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97953e2c-3d85-44a4-9900-b3c53432cc90\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1f9f8b3712320f6149f9124f2c3d85a9957a64678d7f1d0a11728c00afc74cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f805306703ac31a2921b4d3bfd202ef7c3c71e5954c987ad3b4bbd4827b46568\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4543419f7cc5df1a3788158c9b4540bb0e5320349e033d6a4a495267a84f5dd8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effb345da8dd5ec896b8b390b24c11d5fafc667b987d984ea5df7647068c4f46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:18Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.945579 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bdccebb0-daa8-4163-87a3-191bef44cf94\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0650e3b31b409afa68b74eb066029d1ac832f14615432265ce968cbe9b89c78a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60ce022aa50e5962258046fb06f1008a15dab87927995d4308d0c2dc75fb9d9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e5bf8c1e3e1a89326ac8ad0bb8027da467c25709d34c16f81fd8915cc0cf76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19a3d19b378e54ad8ff325f81e065e19a20aad97b002393619e9165653d75a78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19a3d19b378e54ad8ff325f81e065e19a20aad97b002393619e9165653d75a78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:18Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.960668 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:18Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.965408 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.965521 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.965531 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.965554 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.965567 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:18Z","lastTransitionTime":"2026-01-21T15:25:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:18 crc kubenswrapper[5021]: I0121 15:25:18.975329 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0aed55e8987f8148a0e729da9e5dcad94bc05e1441eb8e087e05d8b81da4d2df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:18Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:19 crc kubenswrapper[5021]: I0121 15:25:19.068319 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:19 crc kubenswrapper[5021]: I0121 15:25:19.068407 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:19 crc kubenswrapper[5021]: I0121 15:25:19.068429 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:19 crc kubenswrapper[5021]: I0121 15:25:19.068463 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:19 crc kubenswrapper[5021]: I0121 15:25:19.068484 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:19Z","lastTransitionTime":"2026-01-21T15:25:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:19 crc kubenswrapper[5021]: I0121 15:25:19.171376 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:19 crc kubenswrapper[5021]: I0121 15:25:19.171441 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:19 crc kubenswrapper[5021]: I0121 15:25:19.171453 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:19 crc kubenswrapper[5021]: I0121 15:25:19.171475 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:19 crc kubenswrapper[5021]: I0121 15:25:19.171489 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:19Z","lastTransitionTime":"2026-01-21T15:25:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:19 crc kubenswrapper[5021]: I0121 15:25:19.280667 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:19 crc kubenswrapper[5021]: I0121 15:25:19.280729 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:19 crc kubenswrapper[5021]: I0121 15:25:19.280741 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:19 crc kubenswrapper[5021]: I0121 15:25:19.280760 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:19 crc kubenswrapper[5021]: I0121 15:25:19.280777 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:19Z","lastTransitionTime":"2026-01-21T15:25:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:19 crc kubenswrapper[5021]: I0121 15:25:19.384496 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:19 crc kubenswrapper[5021]: I0121 15:25:19.384550 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:19 crc kubenswrapper[5021]: I0121 15:25:19.384560 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:19 crc kubenswrapper[5021]: I0121 15:25:19.384577 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:19 crc kubenswrapper[5021]: I0121 15:25:19.384590 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:19Z","lastTransitionTime":"2026-01-21T15:25:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:19 crc kubenswrapper[5021]: I0121 15:25:19.487323 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:19 crc kubenswrapper[5021]: I0121 15:25:19.487363 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:19 crc kubenswrapper[5021]: I0121 15:25:19.487377 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:19 crc kubenswrapper[5021]: I0121 15:25:19.487393 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:19 crc kubenswrapper[5021]: I0121 15:25:19.487404 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:19Z","lastTransitionTime":"2026-01-21T15:25:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:19 crc kubenswrapper[5021]: I0121 15:25:19.589997 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:19 crc kubenswrapper[5021]: I0121 15:25:19.590041 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:19 crc kubenswrapper[5021]: I0121 15:25:19.590056 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:19 crc kubenswrapper[5021]: I0121 15:25:19.590077 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:19 crc kubenswrapper[5021]: I0121 15:25:19.590092 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:19Z","lastTransitionTime":"2026-01-21T15:25:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:19 crc kubenswrapper[5021]: I0121 15:25:19.693075 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:19 crc kubenswrapper[5021]: I0121 15:25:19.693138 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:19 crc kubenswrapper[5021]: I0121 15:25:19.693152 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:19 crc kubenswrapper[5021]: I0121 15:25:19.693179 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:19 crc kubenswrapper[5021]: I0121 15:25:19.693195 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:19Z","lastTransitionTime":"2026-01-21T15:25:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:19 crc kubenswrapper[5021]: I0121 15:25:19.720710 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-18 00:38:36.470629021 +0000 UTC Jan 21 15:25:19 crc kubenswrapper[5021]: I0121 15:25:19.796640 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:19 crc kubenswrapper[5021]: I0121 15:25:19.796696 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:19 crc kubenswrapper[5021]: I0121 15:25:19.796709 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:19 crc kubenswrapper[5021]: I0121 15:25:19.796727 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:19 crc kubenswrapper[5021]: I0121 15:25:19.796740 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:19Z","lastTransitionTime":"2026-01-21T15:25:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:19 crc kubenswrapper[5021]: I0121 15:25:19.898982 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:19 crc kubenswrapper[5021]: I0121 15:25:19.899054 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:19 crc kubenswrapper[5021]: I0121 15:25:19.899075 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:19 crc kubenswrapper[5021]: I0121 15:25:19.899108 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:19 crc kubenswrapper[5021]: I0121 15:25:19.899134 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:19Z","lastTransitionTime":"2026-01-21T15:25:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.001617 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.001670 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.001681 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.001701 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.001715 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:20Z","lastTransitionTime":"2026-01-21T15:25:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.104926 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.104971 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.104987 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.105009 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.105025 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:20Z","lastTransitionTime":"2026-01-21T15:25:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.207520 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.207572 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.207583 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.207601 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.207616 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:20Z","lastTransitionTime":"2026-01-21T15:25:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.309966 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.310024 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.310037 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.310082 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.310102 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:20Z","lastTransitionTime":"2026-01-21T15:25:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.412937 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.412986 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.412997 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.413019 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.413031 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:20Z","lastTransitionTime":"2026-01-21T15:25:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.515949 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.515992 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.516002 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.516018 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.516030 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:20Z","lastTransitionTime":"2026-01-21T15:25:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.618327 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.618366 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.618377 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.618395 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.618406 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:20Z","lastTransitionTime":"2026-01-21T15:25:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.655272 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.655309 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.655316 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.655329 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.655338 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:20Z","lastTransitionTime":"2026-01-21T15:25:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:20 crc kubenswrapper[5021]: E0121 15:25:20.666516 5021 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90fd653a-5482-4360-a078-7b7d7b2b9201\\\",\\\"systemUUID\\\":\\\"758eea6e-3652-403c-8df7-2cf690a0b7a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:20Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.670144 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.670300 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.670381 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.670462 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.670532 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:20Z","lastTransitionTime":"2026-01-21T15:25:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:20 crc kubenswrapper[5021]: E0121 15:25:20.680983 5021 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90fd653a-5482-4360-a078-7b7d7b2b9201\\\",\\\"systemUUID\\\":\\\"758eea6e-3652-403c-8df7-2cf690a0b7a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:20Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.684981 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.685028 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.685041 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.685057 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.685070 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:20Z","lastTransitionTime":"2026-01-21T15:25:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:20 crc kubenswrapper[5021]: E0121 15:25:20.697595 5021 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90fd653a-5482-4360-a078-7b7d7b2b9201\\\",\\\"systemUUID\\\":\\\"758eea6e-3652-403c-8df7-2cf690a0b7a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:20Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.701440 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.701564 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.701601 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.701623 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.701643 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:20Z","lastTransitionTime":"2026-01-21T15:25:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:20 crc kubenswrapper[5021]: E0121 15:25:20.714975 5021 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90fd653a-5482-4360-a078-7b7d7b2b9201\\\",\\\"systemUUID\\\":\\\"758eea6e-3652-403c-8df7-2cf690a0b7a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:20Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.718778 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.718817 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.718828 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.718847 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.718858 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:20Z","lastTransitionTime":"2026-01-21T15:25:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.721299 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-21 04:41:58.636958849 +0000 UTC Jan 21 15:25:20 crc kubenswrapper[5021]: E0121 15:25:20.730918 5021 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:20Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90fd653a-5482-4360-a078-7b7d7b2b9201\\\",\\\"systemUUID\\\":\\\"758eea6e-3652-403c-8df7-2cf690a0b7a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:20Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:20 crc kubenswrapper[5021]: E0121 15:25:20.731031 5021 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.732462 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.732485 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.732493 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.732507 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.732515 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:20Z","lastTransitionTime":"2026-01-21T15:25:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.739105 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:25:20 crc kubenswrapper[5021]: E0121 15:25:20.739216 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.739449 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:25:20 crc kubenswrapper[5021]: E0121 15:25:20.739513 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xtd2p" podUID="cb60592c-6770-457b-b2ae-2c6c8f2a4149" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.740120 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:25:20 crc kubenswrapper[5021]: E0121 15:25:20.740328 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.740427 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:25:20 crc kubenswrapper[5021]: E0121 15:25:20.740506 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.834093 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.834121 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.834129 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.834141 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.834150 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:20Z","lastTransitionTime":"2026-01-21T15:25:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.936169 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.936199 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.936207 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.936221 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:20 crc kubenswrapper[5021]: I0121 15:25:20.936250 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:20Z","lastTransitionTime":"2026-01-21T15:25:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:21 crc kubenswrapper[5021]: I0121 15:25:21.038301 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:21 crc kubenswrapper[5021]: I0121 15:25:21.038352 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:21 crc kubenswrapper[5021]: I0121 15:25:21.038368 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:21 crc kubenswrapper[5021]: I0121 15:25:21.038389 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:21 crc kubenswrapper[5021]: I0121 15:25:21.038405 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:21Z","lastTransitionTime":"2026-01-21T15:25:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:21 crc kubenswrapper[5021]: I0121 15:25:21.141035 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:21 crc kubenswrapper[5021]: I0121 15:25:21.141076 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:21 crc kubenswrapper[5021]: I0121 15:25:21.141086 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:21 crc kubenswrapper[5021]: I0121 15:25:21.141101 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:21 crc kubenswrapper[5021]: I0121 15:25:21.141112 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:21Z","lastTransitionTime":"2026-01-21T15:25:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:21 crc kubenswrapper[5021]: I0121 15:25:21.243779 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:21 crc kubenswrapper[5021]: I0121 15:25:21.243818 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:21 crc kubenswrapper[5021]: I0121 15:25:21.243827 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:21 crc kubenswrapper[5021]: I0121 15:25:21.243842 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:21 crc kubenswrapper[5021]: I0121 15:25:21.243852 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:21Z","lastTransitionTime":"2026-01-21T15:25:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:21 crc kubenswrapper[5021]: I0121 15:25:21.346532 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:21 crc kubenswrapper[5021]: I0121 15:25:21.346561 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:21 crc kubenswrapper[5021]: I0121 15:25:21.346569 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:21 crc kubenswrapper[5021]: I0121 15:25:21.346581 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:21 crc kubenswrapper[5021]: I0121 15:25:21.346590 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:21Z","lastTransitionTime":"2026-01-21T15:25:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:21 crc kubenswrapper[5021]: I0121 15:25:21.450017 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:21 crc kubenswrapper[5021]: I0121 15:25:21.450067 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:21 crc kubenswrapper[5021]: I0121 15:25:21.450080 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:21 crc kubenswrapper[5021]: I0121 15:25:21.450099 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:21 crc kubenswrapper[5021]: I0121 15:25:21.450114 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:21Z","lastTransitionTime":"2026-01-21T15:25:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:21 crc kubenswrapper[5021]: I0121 15:25:21.552554 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:21 crc kubenswrapper[5021]: I0121 15:25:21.552596 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:21 crc kubenswrapper[5021]: I0121 15:25:21.552606 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:21 crc kubenswrapper[5021]: I0121 15:25:21.552622 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:21 crc kubenswrapper[5021]: I0121 15:25:21.552633 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:21Z","lastTransitionTime":"2026-01-21T15:25:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:21 crc kubenswrapper[5021]: I0121 15:25:21.655167 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:21 crc kubenswrapper[5021]: I0121 15:25:21.655207 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:21 crc kubenswrapper[5021]: I0121 15:25:21.655219 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:21 crc kubenswrapper[5021]: I0121 15:25:21.655236 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:21 crc kubenswrapper[5021]: I0121 15:25:21.655247 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:21Z","lastTransitionTime":"2026-01-21T15:25:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:21 crc kubenswrapper[5021]: I0121 15:25:21.722266 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-02 10:51:55.237152115 +0000 UTC Jan 21 15:25:21 crc kubenswrapper[5021]: I0121 15:25:21.756994 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:21 crc kubenswrapper[5021]: I0121 15:25:21.757030 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:21 crc kubenswrapper[5021]: I0121 15:25:21.757041 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:21 crc kubenswrapper[5021]: I0121 15:25:21.757056 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:21 crc kubenswrapper[5021]: I0121 15:25:21.757068 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:21Z","lastTransitionTime":"2026-01-21T15:25:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:21 crc kubenswrapper[5021]: I0121 15:25:21.859538 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:21 crc kubenswrapper[5021]: I0121 15:25:21.859600 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:21 crc kubenswrapper[5021]: I0121 15:25:21.859622 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:21 crc kubenswrapper[5021]: I0121 15:25:21.859646 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:21 crc kubenswrapper[5021]: I0121 15:25:21.859664 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:21Z","lastTransitionTime":"2026-01-21T15:25:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:21 crc kubenswrapper[5021]: I0121 15:25:21.962466 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:21 crc kubenswrapper[5021]: I0121 15:25:21.962524 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:21 crc kubenswrapper[5021]: I0121 15:25:21.962535 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:21 crc kubenswrapper[5021]: I0121 15:25:21.962557 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:21 crc kubenswrapper[5021]: I0121 15:25:21.962568 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:21Z","lastTransitionTime":"2026-01-21T15:25:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.064840 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.064878 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.064990 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.065012 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.065021 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:22Z","lastTransitionTime":"2026-01-21T15:25:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.167235 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.167277 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.167286 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.167302 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.167313 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:22Z","lastTransitionTime":"2026-01-21T15:25:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.269828 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.269868 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.269879 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.269896 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.269935 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:22Z","lastTransitionTime":"2026-01-21T15:25:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.372321 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.372360 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.372368 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.372386 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.372396 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:22Z","lastTransitionTime":"2026-01-21T15:25:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.475256 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.475296 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.475306 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.475319 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.475328 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:22Z","lastTransitionTime":"2026-01-21T15:25:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.577574 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.577616 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.577629 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.577651 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.577665 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:22Z","lastTransitionTime":"2026-01-21T15:25:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.680653 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.680700 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.680710 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.681030 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.681052 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:22Z","lastTransitionTime":"2026-01-21T15:25:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.722361 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-05 22:28:22.634990872 +0000 UTC Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.737886 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.737981 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.738064 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:25:22 crc kubenswrapper[5021]: E0121 15:25:22.738073 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:25:22 crc kubenswrapper[5021]: E0121 15:25:22.738149 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xtd2p" podUID="cb60592c-6770-457b-b2ae-2c6c8f2a4149" Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.738240 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:25:22 crc kubenswrapper[5021]: E0121 15:25:22.738247 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:25:22 crc kubenswrapper[5021]: E0121 15:25:22.738394 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.783426 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.783465 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.783474 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.783489 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.783499 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:22Z","lastTransitionTime":"2026-01-21T15:25:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.886965 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.887021 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.887035 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.887058 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.887072 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:22Z","lastTransitionTime":"2026-01-21T15:25:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.989496 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.989543 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.989555 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.989573 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:22 crc kubenswrapper[5021]: I0121 15:25:22.989585 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:22Z","lastTransitionTime":"2026-01-21T15:25:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:23 crc kubenswrapper[5021]: I0121 15:25:23.092752 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:23 crc kubenswrapper[5021]: I0121 15:25:23.093220 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:23 crc kubenswrapper[5021]: I0121 15:25:23.093318 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:23 crc kubenswrapper[5021]: I0121 15:25:23.093402 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:23 crc kubenswrapper[5021]: I0121 15:25:23.093476 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:23Z","lastTransitionTime":"2026-01-21T15:25:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:23 crc kubenswrapper[5021]: I0121 15:25:23.196306 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:23 crc kubenswrapper[5021]: I0121 15:25:23.196348 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:23 crc kubenswrapper[5021]: I0121 15:25:23.196360 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:23 crc kubenswrapper[5021]: I0121 15:25:23.196377 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:23 crc kubenswrapper[5021]: I0121 15:25:23.196388 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:23Z","lastTransitionTime":"2026-01-21T15:25:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:23 crc kubenswrapper[5021]: I0121 15:25:23.298733 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:23 crc kubenswrapper[5021]: I0121 15:25:23.298800 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:23 crc kubenswrapper[5021]: I0121 15:25:23.298812 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:23 crc kubenswrapper[5021]: I0121 15:25:23.298830 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:23 crc kubenswrapper[5021]: I0121 15:25:23.298860 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:23Z","lastTransitionTime":"2026-01-21T15:25:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:23 crc kubenswrapper[5021]: I0121 15:25:23.401874 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:23 crc kubenswrapper[5021]: I0121 15:25:23.401950 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:23 crc kubenswrapper[5021]: I0121 15:25:23.401963 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:23 crc kubenswrapper[5021]: I0121 15:25:23.401985 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:23 crc kubenswrapper[5021]: I0121 15:25:23.401998 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:23Z","lastTransitionTime":"2026-01-21T15:25:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:23 crc kubenswrapper[5021]: I0121 15:25:23.504871 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:23 crc kubenswrapper[5021]: I0121 15:25:23.504901 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:23 crc kubenswrapper[5021]: I0121 15:25:23.504932 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:23 crc kubenswrapper[5021]: I0121 15:25:23.504948 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:23 crc kubenswrapper[5021]: I0121 15:25:23.504958 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:23Z","lastTransitionTime":"2026-01-21T15:25:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:23 crc kubenswrapper[5021]: I0121 15:25:23.608992 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:23 crc kubenswrapper[5021]: I0121 15:25:23.609057 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:23 crc kubenswrapper[5021]: I0121 15:25:23.609070 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:23 crc kubenswrapper[5021]: I0121 15:25:23.609091 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:23 crc kubenswrapper[5021]: I0121 15:25:23.609103 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:23Z","lastTransitionTime":"2026-01-21T15:25:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:23 crc kubenswrapper[5021]: I0121 15:25:23.711690 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:23 crc kubenswrapper[5021]: I0121 15:25:23.711735 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:23 crc kubenswrapper[5021]: I0121 15:25:23.711746 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:23 crc kubenswrapper[5021]: I0121 15:25:23.711760 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:23 crc kubenswrapper[5021]: I0121 15:25:23.711771 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:23Z","lastTransitionTime":"2026-01-21T15:25:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:23 crc kubenswrapper[5021]: I0121 15:25:23.723141 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-22 11:11:34.686797086 +0000 UTC Jan 21 15:25:23 crc kubenswrapper[5021]: I0121 15:25:23.814899 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:23 crc kubenswrapper[5021]: I0121 15:25:23.814977 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:23 crc kubenswrapper[5021]: I0121 15:25:23.814991 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:23 crc kubenswrapper[5021]: I0121 15:25:23.815009 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:23 crc kubenswrapper[5021]: I0121 15:25:23.815441 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:23Z","lastTransitionTime":"2026-01-21T15:25:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:23 crc kubenswrapper[5021]: I0121 15:25:23.919113 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:23 crc kubenswrapper[5021]: I0121 15:25:23.919157 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:23 crc kubenswrapper[5021]: I0121 15:25:23.919168 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:23 crc kubenswrapper[5021]: I0121 15:25:23.919188 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:23 crc kubenswrapper[5021]: I0121 15:25:23.919203 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:23Z","lastTransitionTime":"2026-01-21T15:25:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.022292 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.022342 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.022350 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.022367 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.022377 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:24Z","lastTransitionTime":"2026-01-21T15:25:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.125433 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.125473 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.125486 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.125505 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.125517 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:24Z","lastTransitionTime":"2026-01-21T15:25:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.228734 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.228785 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.228797 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.228817 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.228831 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:24Z","lastTransitionTime":"2026-01-21T15:25:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.332307 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.332375 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.332388 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.332408 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.332425 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:24Z","lastTransitionTime":"2026-01-21T15:25:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.434933 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.434979 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.434989 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.435006 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.435017 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:24Z","lastTransitionTime":"2026-01-21T15:25:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.538033 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.538261 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.538344 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.538428 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.538694 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:24Z","lastTransitionTime":"2026-01-21T15:25:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.641001 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.641071 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.641088 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.641105 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.641114 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:24Z","lastTransitionTime":"2026-01-21T15:25:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.723291 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-30 16:03:15.499776069 +0000 UTC Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.736852 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.736881 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.736875 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:25:24 crc kubenswrapper[5021]: E0121 15:25:24.737018 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.737038 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:25:24 crc kubenswrapper[5021]: E0121 15:25:24.737144 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:25:24 crc kubenswrapper[5021]: E0121 15:25:24.737277 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xtd2p" podUID="cb60592c-6770-457b-b2ae-2c6c8f2a4149" Jan 21 15:25:24 crc kubenswrapper[5021]: E0121 15:25:24.737343 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.743167 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.743214 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.743224 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.743238 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.743251 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:24Z","lastTransitionTime":"2026-01-21T15:25:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.846427 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.846473 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.846485 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.846504 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.846517 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:24Z","lastTransitionTime":"2026-01-21T15:25:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.949298 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.949338 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.949350 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.949365 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:24 crc kubenswrapper[5021]: I0121 15:25:24.949377 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:24Z","lastTransitionTime":"2026-01-21T15:25:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:25 crc kubenswrapper[5021]: I0121 15:25:25.052175 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:25 crc kubenswrapper[5021]: I0121 15:25:25.052220 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:25 crc kubenswrapper[5021]: I0121 15:25:25.052231 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:25 crc kubenswrapper[5021]: I0121 15:25:25.052245 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:25 crc kubenswrapper[5021]: I0121 15:25:25.052256 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:25Z","lastTransitionTime":"2026-01-21T15:25:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:25 crc kubenswrapper[5021]: I0121 15:25:25.154876 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:25 crc kubenswrapper[5021]: I0121 15:25:25.155401 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:25 crc kubenswrapper[5021]: I0121 15:25:25.155505 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:25 crc kubenswrapper[5021]: I0121 15:25:25.155622 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:25 crc kubenswrapper[5021]: I0121 15:25:25.155730 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:25Z","lastTransitionTime":"2026-01-21T15:25:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:25 crc kubenswrapper[5021]: I0121 15:25:25.258725 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:25 crc kubenswrapper[5021]: I0121 15:25:25.258773 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:25 crc kubenswrapper[5021]: I0121 15:25:25.258783 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:25 crc kubenswrapper[5021]: I0121 15:25:25.258800 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:25 crc kubenswrapper[5021]: I0121 15:25:25.258812 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:25Z","lastTransitionTime":"2026-01-21T15:25:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:25 crc kubenswrapper[5021]: I0121 15:25:25.360953 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:25 crc kubenswrapper[5021]: I0121 15:25:25.360993 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:25 crc kubenswrapper[5021]: I0121 15:25:25.361004 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:25 crc kubenswrapper[5021]: I0121 15:25:25.361018 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:25 crc kubenswrapper[5021]: I0121 15:25:25.361028 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:25Z","lastTransitionTime":"2026-01-21T15:25:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:25 crc kubenswrapper[5021]: I0121 15:25:25.464948 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:25 crc kubenswrapper[5021]: I0121 15:25:25.465000 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:25 crc kubenswrapper[5021]: I0121 15:25:25.465013 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:25 crc kubenswrapper[5021]: I0121 15:25:25.465034 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:25 crc kubenswrapper[5021]: I0121 15:25:25.465047 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:25Z","lastTransitionTime":"2026-01-21T15:25:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:25 crc kubenswrapper[5021]: I0121 15:25:25.568688 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:25 crc kubenswrapper[5021]: I0121 15:25:25.568757 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:25 crc kubenswrapper[5021]: I0121 15:25:25.568766 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:25 crc kubenswrapper[5021]: I0121 15:25:25.568781 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:25 crc kubenswrapper[5021]: I0121 15:25:25.568791 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:25Z","lastTransitionTime":"2026-01-21T15:25:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:25 crc kubenswrapper[5021]: I0121 15:25:25.671061 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:25 crc kubenswrapper[5021]: I0121 15:25:25.671105 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:25 crc kubenswrapper[5021]: I0121 15:25:25.671115 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:25 crc kubenswrapper[5021]: I0121 15:25:25.671129 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:25 crc kubenswrapper[5021]: I0121 15:25:25.671140 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:25Z","lastTransitionTime":"2026-01-21T15:25:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:25 crc kubenswrapper[5021]: I0121 15:25:25.723733 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-03 07:42:58.866053857 +0000 UTC Jan 21 15:25:25 crc kubenswrapper[5021]: I0121 15:25:25.738229 5021 scope.go:117] "RemoveContainer" containerID="6a2c749aca14ef61231e1298f1fca331c578ca6eb0ca298d8db61ac45db9a1f6" Jan 21 15:25:25 crc kubenswrapper[5021]: E0121 15:25:25.738425 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-9flhm_openshift-ovn-kubernetes(2e3f3965-4473-46d7-a613-2ed3e4b10ad7)\"" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" Jan 21 15:25:25 crc kubenswrapper[5021]: I0121 15:25:25.774095 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:25 crc kubenswrapper[5021]: I0121 15:25:25.774152 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:25 crc kubenswrapper[5021]: I0121 15:25:25.774168 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:25 crc kubenswrapper[5021]: I0121 15:25:25.774184 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:25 crc kubenswrapper[5021]: I0121 15:25:25.774199 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:25Z","lastTransitionTime":"2026-01-21T15:25:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:25 crc kubenswrapper[5021]: I0121 15:25:25.877622 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:25 crc kubenswrapper[5021]: I0121 15:25:25.877679 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:25 crc kubenswrapper[5021]: I0121 15:25:25.877691 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:25 crc kubenswrapper[5021]: I0121 15:25:25.877712 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:25 crc kubenswrapper[5021]: I0121 15:25:25.877725 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:25Z","lastTransitionTime":"2026-01-21T15:25:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:25 crc kubenswrapper[5021]: I0121 15:25:25.980219 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:25 crc kubenswrapper[5021]: I0121 15:25:25.980290 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:25 crc kubenswrapper[5021]: I0121 15:25:25.980303 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:25 crc kubenswrapper[5021]: I0121 15:25:25.980328 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:25 crc kubenswrapper[5021]: I0121 15:25:25.980344 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:25Z","lastTransitionTime":"2026-01-21T15:25:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:26 crc kubenswrapper[5021]: I0121 15:25:26.083237 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:26 crc kubenswrapper[5021]: I0121 15:25:26.083297 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:26 crc kubenswrapper[5021]: I0121 15:25:26.083305 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:26 crc kubenswrapper[5021]: I0121 15:25:26.083322 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:26 crc kubenswrapper[5021]: I0121 15:25:26.083334 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:26Z","lastTransitionTime":"2026-01-21T15:25:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:26 crc kubenswrapper[5021]: I0121 15:25:26.185278 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:26 crc kubenswrapper[5021]: I0121 15:25:26.185312 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:26 crc kubenswrapper[5021]: I0121 15:25:26.185323 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:26 crc kubenswrapper[5021]: I0121 15:25:26.185340 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:26 crc kubenswrapper[5021]: I0121 15:25:26.185354 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:26Z","lastTransitionTime":"2026-01-21T15:25:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:26 crc kubenswrapper[5021]: I0121 15:25:26.287883 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:26 crc kubenswrapper[5021]: I0121 15:25:26.287979 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:26 crc kubenswrapper[5021]: I0121 15:25:26.287994 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:26 crc kubenswrapper[5021]: I0121 15:25:26.288016 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:26 crc kubenswrapper[5021]: I0121 15:25:26.288029 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:26Z","lastTransitionTime":"2026-01-21T15:25:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:26 crc kubenswrapper[5021]: I0121 15:25:26.390670 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:26 crc kubenswrapper[5021]: I0121 15:25:26.390730 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:26 crc kubenswrapper[5021]: I0121 15:25:26.390746 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:26 crc kubenswrapper[5021]: I0121 15:25:26.390766 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:26 crc kubenswrapper[5021]: I0121 15:25:26.390776 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:26Z","lastTransitionTime":"2026-01-21T15:25:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:26 crc kubenswrapper[5021]: I0121 15:25:26.494331 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:26 crc kubenswrapper[5021]: I0121 15:25:26.494400 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:26 crc kubenswrapper[5021]: I0121 15:25:26.494415 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:26 crc kubenswrapper[5021]: I0121 15:25:26.494434 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:26 crc kubenswrapper[5021]: I0121 15:25:26.494452 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:26Z","lastTransitionTime":"2026-01-21T15:25:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:26 crc kubenswrapper[5021]: I0121 15:25:26.597307 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:26 crc kubenswrapper[5021]: I0121 15:25:26.597351 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:26 crc kubenswrapper[5021]: I0121 15:25:26.597361 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:26 crc kubenswrapper[5021]: I0121 15:25:26.597377 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:26 crc kubenswrapper[5021]: I0121 15:25:26.597389 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:26Z","lastTransitionTime":"2026-01-21T15:25:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:26 crc kubenswrapper[5021]: I0121 15:25:26.700081 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:26 crc kubenswrapper[5021]: I0121 15:25:26.700126 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:26 crc kubenswrapper[5021]: I0121 15:25:26.700137 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:26 crc kubenswrapper[5021]: I0121 15:25:26.700155 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:26 crc kubenswrapper[5021]: I0121 15:25:26.700167 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:26Z","lastTransitionTime":"2026-01-21T15:25:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:26 crc kubenswrapper[5021]: I0121 15:25:26.724546 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-12 02:50:14.841919835 +0000 UTC Jan 21 15:25:26 crc kubenswrapper[5021]: I0121 15:25:26.736838 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:25:26 crc kubenswrapper[5021]: E0121 15:25:26.736981 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:25:26 crc kubenswrapper[5021]: I0121 15:25:26.736839 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:25:26 crc kubenswrapper[5021]: I0121 15:25:26.737041 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:25:26 crc kubenswrapper[5021]: I0121 15:25:26.737042 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:25:26 crc kubenswrapper[5021]: E0121 15:25:26.737303 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:25:26 crc kubenswrapper[5021]: E0121 15:25:26.737355 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xtd2p" podUID="cb60592c-6770-457b-b2ae-2c6c8f2a4149" Jan 21 15:25:26 crc kubenswrapper[5021]: E0121 15:25:26.737427 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:25:26 crc kubenswrapper[5021]: I0121 15:25:26.802361 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:26 crc kubenswrapper[5021]: I0121 15:25:26.802394 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:26 crc kubenswrapper[5021]: I0121 15:25:26.802405 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:26 crc kubenswrapper[5021]: I0121 15:25:26.802419 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:26 crc kubenswrapper[5021]: I0121 15:25:26.802430 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:26Z","lastTransitionTime":"2026-01-21T15:25:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:26 crc kubenswrapper[5021]: I0121 15:25:26.905580 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:26 crc kubenswrapper[5021]: I0121 15:25:26.905936 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:26 crc kubenswrapper[5021]: I0121 15:25:26.906026 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:26 crc kubenswrapper[5021]: I0121 15:25:26.906105 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:26 crc kubenswrapper[5021]: I0121 15:25:26.906184 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:26Z","lastTransitionTime":"2026-01-21T15:25:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:27 crc kubenswrapper[5021]: I0121 15:25:27.008818 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:27 crc kubenswrapper[5021]: I0121 15:25:27.008855 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:27 crc kubenswrapper[5021]: I0121 15:25:27.008864 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:27 crc kubenswrapper[5021]: I0121 15:25:27.008881 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:27 crc kubenswrapper[5021]: I0121 15:25:27.008891 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:27Z","lastTransitionTime":"2026-01-21T15:25:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:27 crc kubenswrapper[5021]: I0121 15:25:27.111333 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:27 crc kubenswrapper[5021]: I0121 15:25:27.111381 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:27 crc kubenswrapper[5021]: I0121 15:25:27.111394 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:27 crc kubenswrapper[5021]: I0121 15:25:27.111411 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:27 crc kubenswrapper[5021]: I0121 15:25:27.111423 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:27Z","lastTransitionTime":"2026-01-21T15:25:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:27 crc kubenswrapper[5021]: I0121 15:25:27.213419 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:27 crc kubenswrapper[5021]: I0121 15:25:27.213466 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:27 crc kubenswrapper[5021]: I0121 15:25:27.213478 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:27 crc kubenswrapper[5021]: I0121 15:25:27.213495 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:27 crc kubenswrapper[5021]: I0121 15:25:27.213508 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:27Z","lastTransitionTime":"2026-01-21T15:25:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:27 crc kubenswrapper[5021]: I0121 15:25:27.316024 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:27 crc kubenswrapper[5021]: I0121 15:25:27.316092 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:27 crc kubenswrapper[5021]: I0121 15:25:27.316105 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:27 crc kubenswrapper[5021]: I0121 15:25:27.316121 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:27 crc kubenswrapper[5021]: I0121 15:25:27.316131 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:27Z","lastTransitionTime":"2026-01-21T15:25:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:27 crc kubenswrapper[5021]: I0121 15:25:27.419293 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:27 crc kubenswrapper[5021]: I0121 15:25:27.419348 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:27 crc kubenswrapper[5021]: I0121 15:25:27.419359 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:27 crc kubenswrapper[5021]: I0121 15:25:27.419377 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:27 crc kubenswrapper[5021]: I0121 15:25:27.419389 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:27Z","lastTransitionTime":"2026-01-21T15:25:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:27 crc kubenswrapper[5021]: I0121 15:25:27.522315 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:27 crc kubenswrapper[5021]: I0121 15:25:27.522371 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:27 crc kubenswrapper[5021]: I0121 15:25:27.522382 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:27 crc kubenswrapper[5021]: I0121 15:25:27.522399 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:27 crc kubenswrapper[5021]: I0121 15:25:27.522411 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:27Z","lastTransitionTime":"2026-01-21T15:25:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:27 crc kubenswrapper[5021]: I0121 15:25:27.626231 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:27 crc kubenswrapper[5021]: I0121 15:25:27.626285 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:27 crc kubenswrapper[5021]: I0121 15:25:27.626296 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:27 crc kubenswrapper[5021]: I0121 15:25:27.626323 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:27 crc kubenswrapper[5021]: I0121 15:25:27.626338 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:27Z","lastTransitionTime":"2026-01-21T15:25:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:27 crc kubenswrapper[5021]: I0121 15:25:27.725433 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-09 21:41:50.106888741 +0000 UTC Jan 21 15:25:27 crc kubenswrapper[5021]: I0121 15:25:27.728901 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:27 crc kubenswrapper[5021]: I0121 15:25:27.729281 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:27 crc kubenswrapper[5021]: I0121 15:25:27.729375 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:27 crc kubenswrapper[5021]: I0121 15:25:27.729492 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:27 crc kubenswrapper[5021]: I0121 15:25:27.729595 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:27Z","lastTransitionTime":"2026-01-21T15:25:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:27 crc kubenswrapper[5021]: I0121 15:25:27.832079 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:27 crc kubenswrapper[5021]: I0121 15:25:27.832131 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:27 crc kubenswrapper[5021]: I0121 15:25:27.832143 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:27 crc kubenswrapper[5021]: I0121 15:25:27.832164 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:27 crc kubenswrapper[5021]: I0121 15:25:27.832178 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:27Z","lastTransitionTime":"2026-01-21T15:25:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:27 crc kubenswrapper[5021]: I0121 15:25:27.934230 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:27 crc kubenswrapper[5021]: I0121 15:25:27.934263 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:27 crc kubenswrapper[5021]: I0121 15:25:27.934272 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:27 crc kubenswrapper[5021]: I0121 15:25:27.934283 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:27 crc kubenswrapper[5021]: I0121 15:25:27.934292 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:27Z","lastTransitionTime":"2026-01-21T15:25:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.036406 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.036492 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.036505 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.036522 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.036561 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:28Z","lastTransitionTime":"2026-01-21T15:25:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.138877 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.138975 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.138988 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.139010 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.139023 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:28Z","lastTransitionTime":"2026-01-21T15:25:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.242475 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.242521 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.242538 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.242581 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.242596 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:28Z","lastTransitionTime":"2026-01-21T15:25:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.345281 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.345323 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.345333 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.345348 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.345359 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:28Z","lastTransitionTime":"2026-01-21T15:25:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.447600 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.447651 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.447667 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.447682 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.447692 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:28Z","lastTransitionTime":"2026-01-21T15:25:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.551144 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.551804 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.551830 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.552284 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.552304 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:28Z","lastTransitionTime":"2026-01-21T15:25:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.655593 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.655641 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.655654 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.655673 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.655686 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:28Z","lastTransitionTime":"2026-01-21T15:25:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.726773 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-16 10:33:40.161265579 +0000 UTC Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.737362 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.737510 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:25:28 crc kubenswrapper[5021]: E0121 15:25:28.737528 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:25:28 crc kubenswrapper[5021]: E0121 15:25:28.737687 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.737713 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:25:28 crc kubenswrapper[5021]: E0121 15:25:28.737807 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.737386 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:25:28 crc kubenswrapper[5021]: E0121 15:25:28.737970 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xtd2p" podUID="cb60592c-6770-457b-b2ae-2c6c8f2a4149" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.756648 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97953e2c-3d85-44a4-9900-b3c53432cc90\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1f9f8b3712320f6149f9124f2c3d85a9957a64678d7f1d0a11728c00afc74cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f805306703ac31a2921b4d3bfd202ef7c3c71e5954c987ad3b4bbd4827b46568\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4543419f7cc5df1a3788158c9b4540bb0e5320349e033d6a4a495267a84f5dd8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effb345da8dd5ec896b8b390b24c11d5fafc667b987d984ea5df7647068c4f46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:28Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.758535 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.758571 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.758590 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.758608 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.758634 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:28Z","lastTransitionTime":"2026-01-21T15:25:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.774234 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bdccebb0-daa8-4163-87a3-191bef44cf94\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0650e3b31b409afa68b74eb066029d1ac832f14615432265ce968cbe9b89c78a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60ce022aa50e5962258046fb06f1008a15dab87927995d4308d0c2dc75fb9d9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e5bf8c1e3e1a89326ac8ad0bb8027da467c25709d34c16f81fd8915cc0cf76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19a3d19b378e54ad8ff325f81e065e19a20aad97b002393619e9165653d75a78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19a3d19b378e54ad8ff325f81e065e19a20aad97b002393619e9165653d75a78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:28Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.789236 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:28Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.803712 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0aed55e8987f8148a0e729da9e5dcad94bc05e1441eb8e087e05d8b81da4d2df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:28Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.813162 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/cb60592c-6770-457b-b2ae-2c6c8f2a4149-metrics-certs\") pod \"network-metrics-daemon-xtd2p\" (UID: \"cb60592c-6770-457b-b2ae-2c6c8f2a4149\") " pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:25:28 crc kubenswrapper[5021]: E0121 15:25:28.813505 5021 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 21 15:25:28 crc kubenswrapper[5021]: E0121 15:25:28.813829 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cb60592c-6770-457b-b2ae-2c6c8f2a4149-metrics-certs podName:cb60592c-6770-457b-b2ae-2c6c8f2a4149 nodeName:}" failed. No retries permitted until 2026-01-21 15:26:00.813661549 +0000 UTC m=+102.348775628 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/cb60592c-6770-457b-b2ae-2c6c8f2a4149-metrics-certs") pod "network-metrics-daemon-xtd2p" (UID: "cb60592c-6770-457b-b2ae-2c6c8f2a4149") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.822926 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-xtd2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb60592c-6770-457b-b2ae-2c6c8f2a4149\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xbnsh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xbnsh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:56Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-xtd2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:28Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.839561 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://da2a92bf0daa0e03e89d2a5ff93137363faab6817894aadb90e424b35c878da6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e8a3282b80d888e687ba06911f278868d7f50c3f0c52745beebc66440deeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:28Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.853196 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vg9bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78b7c66b-9935-480c-bf2e-9109b6141006\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9036131b35dd8128019792e8c0100da93fec521e8b003b05f17196b85991d46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t65tk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vg9bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:28Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.861725 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.861967 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.862056 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.862179 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.862267 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:28Z","lastTransitionTime":"2026-01-21T15:25:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.874467 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd7j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee3a703c230946ab5d32db22cafa6b8a8945566d53e5b97402adaf6701f3addd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9rhz8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd7j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:28Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.891624 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-597dt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2265b990-9ae6-48a6-b93e-c91bf08a41f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7264e502c2112ab6e5c6ef790a042b5e3fd99d2f1fc117ca3c1245997cabdf59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgbzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://96d3eaf7bd96bfc2526f43d3a02a4d61c9ebfd3562ee0cbbe3a3fe19ddf2429c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgbzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-597dt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:28Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.909314 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bea603621cfb30c2b0a1dc2021ea515cb87cd590232f1c4696341f7eca1d4535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10f4a9dbab7b11fa3eaaae6060df484ff7e212710188c29acc1834f07310d49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbcf60368c95e2747fd275984fc6cfc714ca37c93d92edc446643a41c4022c56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39f53539825ef70fcde1b61f8e650f22c059fd674523bfe1882b9844923ce9c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6064978522c6ddd526c20221a2d4a73961beaea35b31eab9598087b1af017753\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"stHeaderAuthRequestController\\\\nI0121 15:24:36.730187 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 15:24:36.730206 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 15:24:36.730225 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 15:24:36.730232 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 15:24:36.735520 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0121 15:24:36.735652 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735731 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 15:24:36.735756 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 15:24:36.735765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 15:24:36.735770 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0121 15:24:36.737297 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0121 15:24:36.741633 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741680 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741699 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-1615556949/tls.crt::/tmp/serving-cert-1615556949/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769009060\\\\\\\\\\\\\\\" (2026-01-21 15:24:20 +0000 UTC to 2026-02-20 15:24:21 +0000 UTC (now=2026-01-21 15:24:36.741651328 +0000 UTC))\\\\\\\"\\\\nF0121 15:24:36.741699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06cf2acfbc236dc530bb69dff9e6e83755c426daba101b6b6abe9c23684d2b7d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:28Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.926699 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cec185f78f20cb42969ab7a1c67289c23ab5714e5c397b1fb5ee55540adcc80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:28Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.942095 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dq2bd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"67cee991-0227-45a5-bb2d-226481f03fd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45a1ed486d5aef56ffd0f6ed50755a384ab94ce452ea9b4d51804fdca6eb179b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8fmw9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:45Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dq2bd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:28Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.959712 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:28Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.964746 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.964792 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.964803 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.964815 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.964825 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:28Z","lastTransitionTime":"2026-01-21T15:25:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:28 crc kubenswrapper[5021]: I0121 15:25:28.984773 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbaeb819c15bc6d2c7a27387f825f6f2b84a9671ca0d4440fc56b98c9bab7b52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf2232b9e5a3d00ab5e6640b47c6fddffc45b84322505c86450c23e884607366\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee527cf2f3d51077144d81d16dbb6b3563f6e40e30ba92d502b09801ecbca2ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://33a648b0889c361c99d72d947f51b4bf9ed5373bb3192c06f787a46be05694d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3346a404ecc46aaf27f9eebec820eceb15899c2da83fd513d8c1fde406d47ff8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ed7c1837f033493cb08cde88c75467a9fa1dda6ca0f1c72bb2f0c9dccfd0773\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a2c749aca14ef61231e1298f1fca331c578ca6eb0ca298d8db61ac45db9a1f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a2c749aca14ef61231e1298f1fca331c578ca6eb0ca298d8db61ac45db9a1f6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T15:25:12Z\\\",\\\"message\\\":\\\"-scheduler/openshift-kube-scheduler-crc\\\\nI0121 15:25:12.901764 6701 ovn.go:134] Ensuring zone local for Pod openshift-kube-scheduler/openshift-kube-scheduler-crc in node crc\\\\nI0121 15:25:12.901768 6701 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-scheduler/openshift-kube-scheduler-crc after 0 failed attempt(s)\\\\nI0121 15:25:12.901774 6701 default_network_controller.go:776] Recording success event on pod openshift-kube-scheduler/openshift-kube-scheduler-crc\\\\nI0121 15:25:12.901783 6701 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/multus-additional-cni-plugins-k9hxg\\\\nF0121 15:25:12.901785 6701 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate ha\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:25:11Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-9flhm_openshift-ovn-kubernetes(2e3f3965-4473-46d7-a613-2ed3e4b10ad7)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb81f970e9b92212535b8961f324f7e9904f3ead656d5aea28d1a7054245ede8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9flhm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:28Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.000090 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ce8d981292a75a56750a6c51bf6f523a6b29e0496d5b406455711ab49c4c81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c5895dd4f412ff99b134a78d67b65a3792a50da3f9a88407af678092a29d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n22xz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:28Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.019053 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:29Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.038261 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-k9hxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ddf892f9-a048-4335-995e-de581763d230\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d45ae12ddf6d1ac783e31194495a45834964d0379bda6db0655e0194e6f6273b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34249e9db1c2f9f8a1d6f840f32de9cfa2b3f071de7a72784674e64607f513b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34249e9db1c2f9f8a1d6f840f32de9cfa2b3f071de7a72784674e64607f513b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-k9hxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:29Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.069753 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.069796 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.069806 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.069823 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.069835 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:29Z","lastTransitionTime":"2026-01-21T15:25:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.177302 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.177371 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.177392 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.177409 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.177421 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:29Z","lastTransitionTime":"2026-01-21T15:25:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.280980 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.281053 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.281067 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.281087 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.281102 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:29Z","lastTransitionTime":"2026-01-21T15:25:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.384594 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.384647 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.384660 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.384675 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.384685 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:29Z","lastTransitionTime":"2026-01-21T15:25:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.483466 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-sd7j9_49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a/kube-multus/0.log" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.483541 5021 generic.go:334] "Generic (PLEG): container finished" podID="49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a" containerID="ee3a703c230946ab5d32db22cafa6b8a8945566d53e5b97402adaf6701f3addd" exitCode=1 Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.483568 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-sd7j9" event={"ID":"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a","Type":"ContainerDied","Data":"ee3a703c230946ab5d32db22cafa6b8a8945566d53e5b97402adaf6701f3addd"} Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.483970 5021 scope.go:117] "RemoveContainer" containerID="ee3a703c230946ab5d32db22cafa6b8a8945566d53e5b97402adaf6701f3addd" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.486750 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.486782 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.486793 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.486807 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.486818 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:29Z","lastTransitionTime":"2026-01-21T15:25:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.510256 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:29Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.527368 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0aed55e8987f8148a0e729da9e5dcad94bc05e1441eb8e087e05d8b81da4d2df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:29Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.542536 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-xtd2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb60592c-6770-457b-b2ae-2c6c8f2a4149\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xbnsh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xbnsh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:56Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-xtd2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:29Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.561935 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97953e2c-3d85-44a4-9900-b3c53432cc90\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1f9f8b3712320f6149f9124f2c3d85a9957a64678d7f1d0a11728c00afc74cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f805306703ac31a2921b4d3bfd202ef7c3c71e5954c987ad3b4bbd4827b46568\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4543419f7cc5df1a3788158c9b4540bb0e5320349e033d6a4a495267a84f5dd8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effb345da8dd5ec896b8b390b24c11d5fafc667b987d984ea5df7647068c4f46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:29Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.580089 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bdccebb0-daa8-4163-87a3-191bef44cf94\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0650e3b31b409afa68b74eb066029d1ac832f14615432265ce968cbe9b89c78a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60ce022aa50e5962258046fb06f1008a15dab87927995d4308d0c2dc75fb9d9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e5bf8c1e3e1a89326ac8ad0bb8027da467c25709d34c16f81fd8915cc0cf76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19a3d19b378e54ad8ff325f81e065e19a20aad97b002393619e9165653d75a78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19a3d19b378e54ad8ff325f81e065e19a20aad97b002393619e9165653d75a78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:29Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.589859 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.589930 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.589945 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.589973 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.589990 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:29Z","lastTransitionTime":"2026-01-21T15:25:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.600608 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd7j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:29Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:29Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee3a703c230946ab5d32db22cafa6b8a8945566d53e5b97402adaf6701f3addd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee3a703c230946ab5d32db22cafa6b8a8945566d53e5b97402adaf6701f3addd\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T15:25:29Z\\\",\\\"message\\\":\\\"2026-01-21T15:24:44+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_7c03d55e-f879-4039-b9c2-3221ee89824b\\\\n2026-01-21T15:24:44+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_7c03d55e-f879-4039-b9c2-3221ee89824b to /host/opt/cni/bin/\\\\n2026-01-21T15:24:44Z [verbose] multus-daemon started\\\\n2026-01-21T15:24:44Z [verbose] Readiness Indicator file check\\\\n2026-01-21T15:25:29Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9rhz8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd7j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:29Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.616486 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-597dt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2265b990-9ae6-48a6-b93e-c91bf08a41f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7264e502c2112ab6e5c6ef790a042b5e3fd99d2f1fc117ca3c1245997cabdf59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgbzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://96d3eaf7bd96bfc2526f43d3a02a4d61c9ebfd3562ee0cbbe3a3fe19ddf2429c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgbzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-597dt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:29Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.633353 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bea603621cfb30c2b0a1dc2021ea515cb87cd590232f1c4696341f7eca1d4535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10f4a9dbab7b11fa3eaaae6060df484ff7e212710188c29acc1834f07310d49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbcf60368c95e2747fd275984fc6cfc714ca37c93d92edc446643a41c4022c56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39f53539825ef70fcde1b61f8e650f22c059fd674523bfe1882b9844923ce9c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6064978522c6ddd526c20221a2d4a73961beaea35b31eab9598087b1af017753\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"stHeaderAuthRequestController\\\\nI0121 15:24:36.730187 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 15:24:36.730206 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 15:24:36.730225 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 15:24:36.730232 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 15:24:36.735520 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0121 15:24:36.735652 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735731 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 15:24:36.735756 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 15:24:36.735765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 15:24:36.735770 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0121 15:24:36.737297 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0121 15:24:36.741633 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741680 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741699 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-1615556949/tls.crt::/tmp/serving-cert-1615556949/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769009060\\\\\\\\\\\\\\\" (2026-01-21 15:24:20 +0000 UTC to 2026-02-20 15:24:21 +0000 UTC (now=2026-01-21 15:24:36.741651328 +0000 UTC))\\\\\\\"\\\\nF0121 15:24:36.741699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06cf2acfbc236dc530bb69dff9e6e83755c426daba101b6b6abe9c23684d2b7d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:29Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.648299 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cec185f78f20cb42969ab7a1c67289c23ab5714e5c397b1fb5ee55540adcc80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:29Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.661891 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://da2a92bf0daa0e03e89d2a5ff93137363faab6817894aadb90e424b35c878da6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e8a3282b80d888e687ba06911f278868d7f50c3f0c52745beebc66440deeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:29Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.675329 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vg9bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78b7c66b-9935-480c-bf2e-9109b6141006\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9036131b35dd8128019792e8c0100da93fec521e8b003b05f17196b85991d46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t65tk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vg9bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:29Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.690557 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:29Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.692454 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.692547 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.692580 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.692604 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.692616 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:29Z","lastTransitionTime":"2026-01-21T15:25:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.714769 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbaeb819c15bc6d2c7a27387f825f6f2b84a9671ca0d4440fc56b98c9bab7b52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf2232b9e5a3d00ab5e6640b47c6fddffc45b84322505c86450c23e884607366\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee527cf2f3d51077144d81d16dbb6b3563f6e40e30ba92d502b09801ecbca2ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://33a648b0889c361c99d72d947f51b4bf9ed5373bb3192c06f787a46be05694d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3346a404ecc46aaf27f9eebec820eceb15899c2da83fd513d8c1fde406d47ff8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ed7c1837f033493cb08cde88c75467a9fa1dda6ca0f1c72bb2f0c9dccfd0773\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a2c749aca14ef61231e1298f1fca331c578ca6eb0ca298d8db61ac45db9a1f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a2c749aca14ef61231e1298f1fca331c578ca6eb0ca298d8db61ac45db9a1f6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T15:25:12Z\\\",\\\"message\\\":\\\"-scheduler/openshift-kube-scheduler-crc\\\\nI0121 15:25:12.901764 6701 ovn.go:134] Ensuring zone local for Pod openshift-kube-scheduler/openshift-kube-scheduler-crc in node crc\\\\nI0121 15:25:12.901768 6701 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-scheduler/openshift-kube-scheduler-crc after 0 failed attempt(s)\\\\nI0121 15:25:12.901774 6701 default_network_controller.go:776] Recording success event on pod openshift-kube-scheduler/openshift-kube-scheduler-crc\\\\nI0121 15:25:12.901783 6701 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/multus-additional-cni-plugins-k9hxg\\\\nF0121 15:25:12.901785 6701 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate ha\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:25:11Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-9flhm_openshift-ovn-kubernetes(2e3f3965-4473-46d7-a613-2ed3e4b10ad7)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb81f970e9b92212535b8961f324f7e9904f3ead656d5aea28d1a7054245ede8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9flhm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:29Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.727216 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-11 10:19:44.920521204 +0000 UTC Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.736559 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dq2bd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"67cee991-0227-45a5-bb2d-226481f03fd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45a1ed486d5aef56ffd0f6ed50755a384ab94ce452ea9b4d51804fdca6eb179b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8fmw9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:45Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dq2bd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:29Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.750753 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:29Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.769227 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-k9hxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ddf892f9-a048-4335-995e-de581763d230\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d45ae12ddf6d1ac783e31194495a45834964d0379bda6db0655e0194e6f6273b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34249e9db1c2f9f8a1d6f840f32de9cfa2b3f071de7a72784674e64607f513b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34249e9db1c2f9f8a1d6f840f32de9cfa2b3f071de7a72784674e64607f513b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-k9hxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:29Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.784701 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ce8d981292a75a56750a6c51bf6f523a6b29e0496d5b406455711ab49c4c81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c5895dd4f412ff99b134a78d67b65a3792a50da3f9a88407af678092a29d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n22xz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:29Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.796004 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.796037 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.796047 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.796061 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.796074 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:29Z","lastTransitionTime":"2026-01-21T15:25:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.899044 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.899089 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.899101 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.899117 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:29 crc kubenswrapper[5021]: I0121 15:25:29.899127 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:29Z","lastTransitionTime":"2026-01-21T15:25:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.002315 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.002364 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.002380 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.002397 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.002408 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:30Z","lastTransitionTime":"2026-01-21T15:25:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.105890 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.106016 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.106027 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.106048 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.106063 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:30Z","lastTransitionTime":"2026-01-21T15:25:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.210506 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.210546 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.210556 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.210571 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.210581 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:30Z","lastTransitionTime":"2026-01-21T15:25:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.313440 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.313488 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.313501 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.313519 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.313534 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:30Z","lastTransitionTime":"2026-01-21T15:25:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.420294 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.420370 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.420403 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.420421 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.420436 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:30Z","lastTransitionTime":"2026-01-21T15:25:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.490819 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-sd7j9_49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a/kube-multus/0.log" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.490876 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-sd7j9" event={"ID":"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a","Type":"ContainerStarted","Data":"9276d2f11794c73e2d9b67ba12b81ec547e14eb6a5808fb86f64d46a12cffcd3"} Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.509546 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bea603621cfb30c2b0a1dc2021ea515cb87cd590232f1c4696341f7eca1d4535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10f4a9dbab7b11fa3eaaae6060df484ff7e212710188c29acc1834f07310d49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbcf60368c95e2747fd275984fc6cfc714ca37c93d92edc446643a41c4022c56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39f53539825ef70fcde1b61f8e650f22c059fd674523bfe1882b9844923ce9c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6064978522c6ddd526c20221a2d4a73961beaea35b31eab9598087b1af017753\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"stHeaderAuthRequestController\\\\nI0121 15:24:36.730187 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 15:24:36.730206 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 15:24:36.730225 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 15:24:36.730232 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 15:24:36.735520 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0121 15:24:36.735652 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735731 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 15:24:36.735756 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 15:24:36.735765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 15:24:36.735770 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0121 15:24:36.737297 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0121 15:24:36.741633 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741680 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741699 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-1615556949/tls.crt::/tmp/serving-cert-1615556949/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769009060\\\\\\\\\\\\\\\" (2026-01-21 15:24:20 +0000 UTC to 2026-02-20 15:24:21 +0000 UTC (now=2026-01-21 15:24:36.741651328 +0000 UTC))\\\\\\\"\\\\nF0121 15:24:36.741699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06cf2acfbc236dc530bb69dff9e6e83755c426daba101b6b6abe9c23684d2b7d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:30Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.523492 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.523551 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.523566 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.523592 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.523606 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:30Z","lastTransitionTime":"2026-01-21T15:25:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.525927 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cec185f78f20cb42969ab7a1c67289c23ab5714e5c397b1fb5ee55540adcc80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:30Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.550950 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://da2a92bf0daa0e03e89d2a5ff93137363faab6817894aadb90e424b35c878da6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e8a3282b80d888e687ba06911f278868d7f50c3f0c52745beebc66440deeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:30Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.566840 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vg9bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78b7c66b-9935-480c-bf2e-9109b6141006\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9036131b35dd8128019792e8c0100da93fec521e8b003b05f17196b85991d46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t65tk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vg9bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:30Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.581855 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd7j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9276d2f11794c73e2d9b67ba12b81ec547e14eb6a5808fb86f64d46a12cffcd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee3a703c230946ab5d32db22cafa6b8a8945566d53e5b97402adaf6701f3addd\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T15:25:29Z\\\",\\\"message\\\":\\\"2026-01-21T15:24:44+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_7c03d55e-f879-4039-b9c2-3221ee89824b\\\\n2026-01-21T15:24:44+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_7c03d55e-f879-4039-b9c2-3221ee89824b to /host/opt/cni/bin/\\\\n2026-01-21T15:24:44Z [verbose] multus-daemon started\\\\n2026-01-21T15:24:44Z [verbose] Readiness Indicator file check\\\\n2026-01-21T15:25:29Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9rhz8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd7j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:30Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.595770 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-597dt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2265b990-9ae6-48a6-b93e-c91bf08a41f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7264e502c2112ab6e5c6ef790a042b5e3fd99d2f1fc117ca3c1245997cabdf59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgbzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://96d3eaf7bd96bfc2526f43d3a02a4d61c9ebfd3562ee0cbbe3a3fe19ddf2429c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgbzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-597dt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:30Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.612969 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:30Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.626414 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.626454 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.626464 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.626480 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.626491 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:30Z","lastTransitionTime":"2026-01-21T15:25:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.636395 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbaeb819c15bc6d2c7a27387f825f6f2b84a9671ca0d4440fc56b98c9bab7b52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf2232b9e5a3d00ab5e6640b47c6fddffc45b84322505c86450c23e884607366\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee527cf2f3d51077144d81d16dbb6b3563f6e40e30ba92d502b09801ecbca2ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://33a648b0889c361c99d72d947f51b4bf9ed5373bb3192c06f787a46be05694d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3346a404ecc46aaf27f9eebec820eceb15899c2da83fd513d8c1fde406d47ff8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ed7c1837f033493cb08cde88c75467a9fa1dda6ca0f1c72bb2f0c9dccfd0773\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a2c749aca14ef61231e1298f1fca331c578ca6eb0ca298d8db61ac45db9a1f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a2c749aca14ef61231e1298f1fca331c578ca6eb0ca298d8db61ac45db9a1f6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T15:25:12Z\\\",\\\"message\\\":\\\"-scheduler/openshift-kube-scheduler-crc\\\\nI0121 15:25:12.901764 6701 ovn.go:134] Ensuring zone local for Pod openshift-kube-scheduler/openshift-kube-scheduler-crc in node crc\\\\nI0121 15:25:12.901768 6701 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-scheduler/openshift-kube-scheduler-crc after 0 failed attempt(s)\\\\nI0121 15:25:12.901774 6701 default_network_controller.go:776] Recording success event on pod openshift-kube-scheduler/openshift-kube-scheduler-crc\\\\nI0121 15:25:12.901783 6701 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/multus-additional-cni-plugins-k9hxg\\\\nF0121 15:25:12.901785 6701 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate ha\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:25:11Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-9flhm_openshift-ovn-kubernetes(2e3f3965-4473-46d7-a613-2ed3e4b10ad7)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb81f970e9b92212535b8961f324f7e9904f3ead656d5aea28d1a7054245ede8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9flhm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:30Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.650462 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dq2bd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"67cee991-0227-45a5-bb2d-226481f03fd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45a1ed486d5aef56ffd0f6ed50755a384ab94ce452ea9b4d51804fdca6eb179b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8fmw9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:45Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dq2bd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:30Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.664411 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:30Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.681127 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-k9hxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ddf892f9-a048-4335-995e-de581763d230\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d45ae12ddf6d1ac783e31194495a45834964d0379bda6db0655e0194e6f6273b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34249e9db1c2f9f8a1d6f840f32de9cfa2b3f071de7a72784674e64607f513b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34249e9db1c2f9f8a1d6f840f32de9cfa2b3f071de7a72784674e64607f513b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-k9hxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:30Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.699016 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ce8d981292a75a56750a6c51bf6f523a6b29e0496d5b406455711ab49c4c81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c5895dd4f412ff99b134a78d67b65a3792a50da3f9a88407af678092a29d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n22xz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:30Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.714070 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-xtd2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb60592c-6770-457b-b2ae-2c6c8f2a4149\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xbnsh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xbnsh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:56Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-xtd2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:30Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.727405 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-07 05:29:42.28843035 +0000 UTC Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.729901 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.729976 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.729985 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.730001 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.730013 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:30Z","lastTransitionTime":"2026-01-21T15:25:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.730857 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97953e2c-3d85-44a4-9900-b3c53432cc90\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1f9f8b3712320f6149f9124f2c3d85a9957a64678d7f1d0a11728c00afc74cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f805306703ac31a2921b4d3bfd202ef7c3c71e5954c987ad3b4bbd4827b46568\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4543419f7cc5df1a3788158c9b4540bb0e5320349e033d6a4a495267a84f5dd8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effb345da8dd5ec896b8b390b24c11d5fafc667b987d984ea5df7647068c4f46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:30Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.737454 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.737472 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.737458 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:25:30 crc kubenswrapper[5021]: E0121 15:25:30.737582 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.737711 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:25:30 crc kubenswrapper[5021]: E0121 15:25:30.737814 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:25:30 crc kubenswrapper[5021]: E0121 15:25:30.737955 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xtd2p" podUID="cb60592c-6770-457b-b2ae-2c6c8f2a4149" Jan 21 15:25:30 crc kubenswrapper[5021]: E0121 15:25:30.738124 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.744497 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bdccebb0-daa8-4163-87a3-191bef44cf94\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0650e3b31b409afa68b74eb066029d1ac832f14615432265ce968cbe9b89c78a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60ce022aa50e5962258046fb06f1008a15dab87927995d4308d0c2dc75fb9d9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e5bf8c1e3e1a89326ac8ad0bb8027da467c25709d34c16f81fd8915cc0cf76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19a3d19b378e54ad8ff325f81e065e19a20aad97b002393619e9165653d75a78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19a3d19b378e54ad8ff325f81e065e19a20aad97b002393619e9165653d75a78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:30Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.758271 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:30Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.774131 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0aed55e8987f8148a0e729da9e5dcad94bc05e1441eb8e087e05d8b81da4d2df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:30Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.833168 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.833220 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.833229 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.833247 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.833260 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:30Z","lastTransitionTime":"2026-01-21T15:25:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.937265 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.937321 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.937336 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.937358 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:30 crc kubenswrapper[5021]: I0121 15:25:30.937374 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:30Z","lastTransitionTime":"2026-01-21T15:25:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.040890 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.040961 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.040976 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.040994 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.041009 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:31Z","lastTransitionTime":"2026-01-21T15:25:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.125450 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.125528 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.125538 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.125552 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.125562 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:31Z","lastTransitionTime":"2026-01-21T15:25:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:31 crc kubenswrapper[5021]: E0121 15:25:31.140197 5021 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90fd653a-5482-4360-a078-7b7d7b2b9201\\\",\\\"systemUUID\\\":\\\"758eea6e-3652-403c-8df7-2cf690a0b7a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:31Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.144686 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.144733 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.144745 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.144762 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.144774 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:31Z","lastTransitionTime":"2026-01-21T15:25:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:31 crc kubenswrapper[5021]: E0121 15:25:31.157885 5021 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90fd653a-5482-4360-a078-7b7d7b2b9201\\\",\\\"systemUUID\\\":\\\"758eea6e-3652-403c-8df7-2cf690a0b7a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:31Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.162445 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.162489 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.162504 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.162532 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.162548 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:31Z","lastTransitionTime":"2026-01-21T15:25:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:31 crc kubenswrapper[5021]: E0121 15:25:31.176221 5021 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90fd653a-5482-4360-a078-7b7d7b2b9201\\\",\\\"systemUUID\\\":\\\"758eea6e-3652-403c-8df7-2cf690a0b7a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:31Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.180696 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.180745 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.180757 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.180776 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.180788 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:31Z","lastTransitionTime":"2026-01-21T15:25:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:31 crc kubenswrapper[5021]: E0121 15:25:31.196023 5021 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90fd653a-5482-4360-a078-7b7d7b2b9201\\\",\\\"systemUUID\\\":\\\"758eea6e-3652-403c-8df7-2cf690a0b7a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:31Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.200840 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.200885 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.200898 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.200942 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.200956 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:31Z","lastTransitionTime":"2026-01-21T15:25:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:31 crc kubenswrapper[5021]: E0121 15:25:31.219678 5021 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90fd653a-5482-4360-a078-7b7d7b2b9201\\\",\\\"systemUUID\\\":\\\"758eea6e-3652-403c-8df7-2cf690a0b7a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:31Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:31 crc kubenswrapper[5021]: E0121 15:25:31.219867 5021 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.223198 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.223250 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.223265 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.223286 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.223301 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:31Z","lastTransitionTime":"2026-01-21T15:25:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.326052 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.326115 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.326127 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.326147 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.326161 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:31Z","lastTransitionTime":"2026-01-21T15:25:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.428414 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.428469 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.428479 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.428495 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.428508 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:31Z","lastTransitionTime":"2026-01-21T15:25:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.530596 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.530648 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.530658 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.530673 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.530683 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:31Z","lastTransitionTime":"2026-01-21T15:25:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.633499 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.633565 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.633575 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.633589 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.633598 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:31Z","lastTransitionTime":"2026-01-21T15:25:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.728036 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-14 19:29:28.090653043 +0000 UTC Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.736051 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.736093 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.736104 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.736117 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.736126 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:31Z","lastTransitionTime":"2026-01-21T15:25:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.838470 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.838519 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.838530 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.838545 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.838557 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:31Z","lastTransitionTime":"2026-01-21T15:25:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.940669 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.940716 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.940732 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.940747 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:31 crc kubenswrapper[5021]: I0121 15:25:31.940760 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:31Z","lastTransitionTime":"2026-01-21T15:25:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.043234 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.043289 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.043303 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.043321 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.043336 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:32Z","lastTransitionTime":"2026-01-21T15:25:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.146086 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.146755 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.146802 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.146825 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.146838 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:32Z","lastTransitionTime":"2026-01-21T15:25:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.249294 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.249336 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.249345 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.249359 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.249393 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:32Z","lastTransitionTime":"2026-01-21T15:25:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.351937 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.351985 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.351998 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.352016 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.352030 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:32Z","lastTransitionTime":"2026-01-21T15:25:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.454774 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.454810 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.454819 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.454834 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.454843 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:32Z","lastTransitionTime":"2026-01-21T15:25:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.557344 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.557390 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.557400 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.557417 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.557429 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:32Z","lastTransitionTime":"2026-01-21T15:25:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.659436 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.659469 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.659477 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.659489 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.659497 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:32Z","lastTransitionTime":"2026-01-21T15:25:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.728933 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-13 10:31:16.209795956 +0000 UTC Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.737242 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.737309 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:25:32 crc kubenswrapper[5021]: E0121 15:25:32.737373 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:25:32 crc kubenswrapper[5021]: E0121 15:25:32.737429 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xtd2p" podUID="cb60592c-6770-457b-b2ae-2c6c8f2a4149" Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.737458 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:25:32 crc kubenswrapper[5021]: E0121 15:25:32.737501 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.737542 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:25:32 crc kubenswrapper[5021]: E0121 15:25:32.737584 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.761983 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.762039 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.762049 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.762067 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.762079 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:32Z","lastTransitionTime":"2026-01-21T15:25:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.864007 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.864046 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.864056 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.864070 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.864082 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:32Z","lastTransitionTime":"2026-01-21T15:25:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.966999 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.967055 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.967070 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.967085 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:32 crc kubenswrapper[5021]: I0121 15:25:32.967099 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:32Z","lastTransitionTime":"2026-01-21T15:25:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:33 crc kubenswrapper[5021]: I0121 15:25:33.070280 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:33 crc kubenswrapper[5021]: I0121 15:25:33.070316 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:33 crc kubenswrapper[5021]: I0121 15:25:33.070324 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:33 crc kubenswrapper[5021]: I0121 15:25:33.070339 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:33 crc kubenswrapper[5021]: I0121 15:25:33.070349 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:33Z","lastTransitionTime":"2026-01-21T15:25:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:33 crc kubenswrapper[5021]: I0121 15:25:33.172134 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:33 crc kubenswrapper[5021]: I0121 15:25:33.172169 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:33 crc kubenswrapper[5021]: I0121 15:25:33.172177 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:33 crc kubenswrapper[5021]: I0121 15:25:33.172191 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:33 crc kubenswrapper[5021]: I0121 15:25:33.172200 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:33Z","lastTransitionTime":"2026-01-21T15:25:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:33 crc kubenswrapper[5021]: I0121 15:25:33.274089 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:33 crc kubenswrapper[5021]: I0121 15:25:33.274148 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:33 crc kubenswrapper[5021]: I0121 15:25:33.274196 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:33 crc kubenswrapper[5021]: I0121 15:25:33.274210 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:33 crc kubenswrapper[5021]: I0121 15:25:33.274219 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:33Z","lastTransitionTime":"2026-01-21T15:25:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:33 crc kubenswrapper[5021]: I0121 15:25:33.377998 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:33 crc kubenswrapper[5021]: I0121 15:25:33.378034 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:33 crc kubenswrapper[5021]: I0121 15:25:33.378049 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:33 crc kubenswrapper[5021]: I0121 15:25:33.378063 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:33 crc kubenswrapper[5021]: I0121 15:25:33.378072 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:33Z","lastTransitionTime":"2026-01-21T15:25:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:33 crc kubenswrapper[5021]: I0121 15:25:33.480877 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:33 crc kubenswrapper[5021]: I0121 15:25:33.480936 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:33 crc kubenswrapper[5021]: I0121 15:25:33.480948 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:33 crc kubenswrapper[5021]: I0121 15:25:33.480962 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:33 crc kubenswrapper[5021]: I0121 15:25:33.480974 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:33Z","lastTransitionTime":"2026-01-21T15:25:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:33 crc kubenswrapper[5021]: I0121 15:25:33.585853 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:33 crc kubenswrapper[5021]: I0121 15:25:33.585903 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:33 crc kubenswrapper[5021]: I0121 15:25:33.585929 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:33 crc kubenswrapper[5021]: I0121 15:25:33.585948 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:33 crc kubenswrapper[5021]: I0121 15:25:33.585964 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:33Z","lastTransitionTime":"2026-01-21T15:25:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:33 crc kubenswrapper[5021]: I0121 15:25:33.688512 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:33 crc kubenswrapper[5021]: I0121 15:25:33.688587 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:33 crc kubenswrapper[5021]: I0121 15:25:33.688597 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:33 crc kubenswrapper[5021]: I0121 15:25:33.688610 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:33 crc kubenswrapper[5021]: I0121 15:25:33.688619 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:33Z","lastTransitionTime":"2026-01-21T15:25:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:33 crc kubenswrapper[5021]: I0121 15:25:33.729382 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-06 12:42:31.877710274 +0000 UTC Jan 21 15:25:33 crc kubenswrapper[5021]: I0121 15:25:33.790796 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:33 crc kubenswrapper[5021]: I0121 15:25:33.790844 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:33 crc kubenswrapper[5021]: I0121 15:25:33.790855 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:33 crc kubenswrapper[5021]: I0121 15:25:33.790871 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:33 crc kubenswrapper[5021]: I0121 15:25:33.790883 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:33Z","lastTransitionTime":"2026-01-21T15:25:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:33 crc kubenswrapper[5021]: I0121 15:25:33.894023 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:33 crc kubenswrapper[5021]: I0121 15:25:33.894056 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:33 crc kubenswrapper[5021]: I0121 15:25:33.894064 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:33 crc kubenswrapper[5021]: I0121 15:25:33.894079 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:33 crc kubenswrapper[5021]: I0121 15:25:33.894093 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:33Z","lastTransitionTime":"2026-01-21T15:25:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:33 crc kubenswrapper[5021]: I0121 15:25:33.996576 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:33 crc kubenswrapper[5021]: I0121 15:25:33.996628 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:33 crc kubenswrapper[5021]: I0121 15:25:33.996645 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:33 crc kubenswrapper[5021]: I0121 15:25:33.996662 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:33 crc kubenswrapper[5021]: I0121 15:25:33.996675 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:33Z","lastTransitionTime":"2026-01-21T15:25:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:34 crc kubenswrapper[5021]: I0121 15:25:34.098863 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:34 crc kubenswrapper[5021]: I0121 15:25:34.098934 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:34 crc kubenswrapper[5021]: I0121 15:25:34.098951 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:34 crc kubenswrapper[5021]: I0121 15:25:34.098971 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:34 crc kubenswrapper[5021]: I0121 15:25:34.098985 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:34Z","lastTransitionTime":"2026-01-21T15:25:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:34 crc kubenswrapper[5021]: I0121 15:25:34.201111 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:34 crc kubenswrapper[5021]: I0121 15:25:34.201156 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:34 crc kubenswrapper[5021]: I0121 15:25:34.201165 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:34 crc kubenswrapper[5021]: I0121 15:25:34.201181 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:34 crc kubenswrapper[5021]: I0121 15:25:34.201192 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:34Z","lastTransitionTime":"2026-01-21T15:25:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:34 crc kubenswrapper[5021]: I0121 15:25:34.303534 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:34 crc kubenswrapper[5021]: I0121 15:25:34.303575 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:34 crc kubenswrapper[5021]: I0121 15:25:34.303590 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:34 crc kubenswrapper[5021]: I0121 15:25:34.303607 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:34 crc kubenswrapper[5021]: I0121 15:25:34.303619 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:34Z","lastTransitionTime":"2026-01-21T15:25:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:34 crc kubenswrapper[5021]: I0121 15:25:34.405726 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:34 crc kubenswrapper[5021]: I0121 15:25:34.405779 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:34 crc kubenswrapper[5021]: I0121 15:25:34.405834 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:34 crc kubenswrapper[5021]: I0121 15:25:34.405921 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:34 crc kubenswrapper[5021]: I0121 15:25:34.405934 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:34Z","lastTransitionTime":"2026-01-21T15:25:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:34 crc kubenswrapper[5021]: I0121 15:25:34.508678 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:34 crc kubenswrapper[5021]: I0121 15:25:34.508721 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:34 crc kubenswrapper[5021]: I0121 15:25:34.508732 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:34 crc kubenswrapper[5021]: I0121 15:25:34.508749 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:34 crc kubenswrapper[5021]: I0121 15:25:34.508761 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:34Z","lastTransitionTime":"2026-01-21T15:25:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:34 crc kubenswrapper[5021]: I0121 15:25:34.611067 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:34 crc kubenswrapper[5021]: I0121 15:25:34.611105 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:34 crc kubenswrapper[5021]: I0121 15:25:34.611113 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:34 crc kubenswrapper[5021]: I0121 15:25:34.611126 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:34 crc kubenswrapper[5021]: I0121 15:25:34.611134 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:34Z","lastTransitionTime":"2026-01-21T15:25:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:34 crc kubenswrapper[5021]: I0121 15:25:34.713820 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:34 crc kubenswrapper[5021]: I0121 15:25:34.713882 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:34 crc kubenswrapper[5021]: I0121 15:25:34.713895 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:34 crc kubenswrapper[5021]: I0121 15:25:34.713927 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:34 crc kubenswrapper[5021]: I0121 15:25:34.713939 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:34Z","lastTransitionTime":"2026-01-21T15:25:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:34 crc kubenswrapper[5021]: I0121 15:25:34.730413 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-07 00:41:58.390150706 +0000 UTC Jan 21 15:25:34 crc kubenswrapper[5021]: I0121 15:25:34.737870 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:25:34 crc kubenswrapper[5021]: I0121 15:25:34.738181 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:25:34 crc kubenswrapper[5021]: I0121 15:25:34.738184 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:25:34 crc kubenswrapper[5021]: I0121 15:25:34.738355 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:25:34 crc kubenswrapper[5021]: E0121 15:25:34.738444 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:25:34 crc kubenswrapper[5021]: E0121 15:25:34.738343 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xtd2p" podUID="cb60592c-6770-457b-b2ae-2c6c8f2a4149" Jan 21 15:25:34 crc kubenswrapper[5021]: E0121 15:25:34.738528 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:25:34 crc kubenswrapper[5021]: E0121 15:25:34.738614 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:25:34 crc kubenswrapper[5021]: I0121 15:25:34.753418 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Jan 21 15:25:34 crc kubenswrapper[5021]: I0121 15:25:34.816395 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:34 crc kubenswrapper[5021]: I0121 15:25:34.816468 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:34 crc kubenswrapper[5021]: I0121 15:25:34.816478 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:34 crc kubenswrapper[5021]: I0121 15:25:34.816495 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:34 crc kubenswrapper[5021]: I0121 15:25:34.816507 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:34Z","lastTransitionTime":"2026-01-21T15:25:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:34 crc kubenswrapper[5021]: I0121 15:25:34.918975 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:34 crc kubenswrapper[5021]: I0121 15:25:34.919016 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:34 crc kubenswrapper[5021]: I0121 15:25:34.919028 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:34 crc kubenswrapper[5021]: I0121 15:25:34.919043 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:34 crc kubenswrapper[5021]: I0121 15:25:34.919054 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:34Z","lastTransitionTime":"2026-01-21T15:25:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:35 crc kubenswrapper[5021]: I0121 15:25:35.021330 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:35 crc kubenswrapper[5021]: I0121 15:25:35.021364 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:35 crc kubenswrapper[5021]: I0121 15:25:35.021374 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:35 crc kubenswrapper[5021]: I0121 15:25:35.021386 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:35 crc kubenswrapper[5021]: I0121 15:25:35.021395 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:35Z","lastTransitionTime":"2026-01-21T15:25:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:35 crc kubenswrapper[5021]: I0121 15:25:35.124315 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:35 crc kubenswrapper[5021]: I0121 15:25:35.124359 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:35 crc kubenswrapper[5021]: I0121 15:25:35.124370 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:35 crc kubenswrapper[5021]: I0121 15:25:35.124387 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:35 crc kubenswrapper[5021]: I0121 15:25:35.124399 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:35Z","lastTransitionTime":"2026-01-21T15:25:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:35 crc kubenswrapper[5021]: I0121 15:25:35.226651 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:35 crc kubenswrapper[5021]: I0121 15:25:35.226706 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:35 crc kubenswrapper[5021]: I0121 15:25:35.226719 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:35 crc kubenswrapper[5021]: I0121 15:25:35.226741 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:35 crc kubenswrapper[5021]: I0121 15:25:35.226754 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:35Z","lastTransitionTime":"2026-01-21T15:25:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:35 crc kubenswrapper[5021]: I0121 15:25:35.328944 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:35 crc kubenswrapper[5021]: I0121 15:25:35.328995 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:35 crc kubenswrapper[5021]: I0121 15:25:35.329008 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:35 crc kubenswrapper[5021]: I0121 15:25:35.329026 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:35 crc kubenswrapper[5021]: I0121 15:25:35.329037 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:35Z","lastTransitionTime":"2026-01-21T15:25:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:35 crc kubenswrapper[5021]: I0121 15:25:35.431583 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:35 crc kubenswrapper[5021]: I0121 15:25:35.431625 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:35 crc kubenswrapper[5021]: I0121 15:25:35.431634 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:35 crc kubenswrapper[5021]: I0121 15:25:35.431647 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:35 crc kubenswrapper[5021]: I0121 15:25:35.431658 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:35Z","lastTransitionTime":"2026-01-21T15:25:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:35 crc kubenswrapper[5021]: I0121 15:25:35.534802 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:35 crc kubenswrapper[5021]: I0121 15:25:35.534842 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:35 crc kubenswrapper[5021]: I0121 15:25:35.534852 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:35 crc kubenswrapper[5021]: I0121 15:25:35.534864 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:35 crc kubenswrapper[5021]: I0121 15:25:35.534874 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:35Z","lastTransitionTime":"2026-01-21T15:25:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:35 crc kubenswrapper[5021]: I0121 15:25:35.639902 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:35 crc kubenswrapper[5021]: I0121 15:25:35.639972 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:35 crc kubenswrapper[5021]: I0121 15:25:35.639988 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:35 crc kubenswrapper[5021]: I0121 15:25:35.640005 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:35 crc kubenswrapper[5021]: I0121 15:25:35.640017 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:35Z","lastTransitionTime":"2026-01-21T15:25:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:35 crc kubenswrapper[5021]: I0121 15:25:35.731535 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-25 11:39:32.476175876 +0000 UTC Jan 21 15:25:35 crc kubenswrapper[5021]: I0121 15:25:35.742473 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:35 crc kubenswrapper[5021]: I0121 15:25:35.742511 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:35 crc kubenswrapper[5021]: I0121 15:25:35.742520 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:35 crc kubenswrapper[5021]: I0121 15:25:35.742533 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:35 crc kubenswrapper[5021]: I0121 15:25:35.742543 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:35Z","lastTransitionTime":"2026-01-21T15:25:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:35 crc kubenswrapper[5021]: I0121 15:25:35.845020 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:35 crc kubenswrapper[5021]: I0121 15:25:35.845084 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:35 crc kubenswrapper[5021]: I0121 15:25:35.845097 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:35 crc kubenswrapper[5021]: I0121 15:25:35.845113 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:35 crc kubenswrapper[5021]: I0121 15:25:35.845124 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:35Z","lastTransitionTime":"2026-01-21T15:25:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:35 crc kubenswrapper[5021]: I0121 15:25:35.947364 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:35 crc kubenswrapper[5021]: I0121 15:25:35.947402 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:35 crc kubenswrapper[5021]: I0121 15:25:35.947412 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:35 crc kubenswrapper[5021]: I0121 15:25:35.947425 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:35 crc kubenswrapper[5021]: I0121 15:25:35.947434 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:35Z","lastTransitionTime":"2026-01-21T15:25:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.050302 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.050360 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.050380 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.050401 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.050412 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:36Z","lastTransitionTime":"2026-01-21T15:25:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.152483 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.152519 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.152529 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.152543 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.152553 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:36Z","lastTransitionTime":"2026-01-21T15:25:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.254393 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.254433 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.254443 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.254458 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.254470 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:36Z","lastTransitionTime":"2026-01-21T15:25:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.357197 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.357233 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.357242 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.357254 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.357263 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:36Z","lastTransitionTime":"2026-01-21T15:25:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.459218 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.459265 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.459278 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.459297 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.459310 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:36Z","lastTransitionTime":"2026-01-21T15:25:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.562160 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.562205 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.562215 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.562236 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.562246 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:36Z","lastTransitionTime":"2026-01-21T15:25:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.664292 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.664343 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.664353 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.664367 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.664379 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:36Z","lastTransitionTime":"2026-01-21T15:25:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.732520 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-08 23:02:05.668022629 +0000 UTC Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.736880 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.736979 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.736979 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.736981 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:25:36 crc kubenswrapper[5021]: E0121 15:25:36.737137 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:25:36 crc kubenswrapper[5021]: E0121 15:25:36.737249 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:25:36 crc kubenswrapper[5021]: E0121 15:25:36.737377 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xtd2p" podUID="cb60592c-6770-457b-b2ae-2c6c8f2a4149" Jan 21 15:25:36 crc kubenswrapper[5021]: E0121 15:25:36.737416 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.766737 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.766788 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.766800 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.766821 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.766836 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:36Z","lastTransitionTime":"2026-01-21T15:25:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.869435 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.869485 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.869499 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.869517 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.869529 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:36Z","lastTransitionTime":"2026-01-21T15:25:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.972401 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.972525 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.972560 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.972593 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:36 crc kubenswrapper[5021]: I0121 15:25:36.972619 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:36Z","lastTransitionTime":"2026-01-21T15:25:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:37 crc kubenswrapper[5021]: I0121 15:25:37.077372 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:37 crc kubenswrapper[5021]: I0121 15:25:37.077441 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:37 crc kubenswrapper[5021]: I0121 15:25:37.077711 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:37 crc kubenswrapper[5021]: I0121 15:25:37.077750 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:37 crc kubenswrapper[5021]: I0121 15:25:37.077763 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:37Z","lastTransitionTime":"2026-01-21T15:25:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:37 crc kubenswrapper[5021]: I0121 15:25:37.180320 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:37 crc kubenswrapper[5021]: I0121 15:25:37.180362 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:37 crc kubenswrapper[5021]: I0121 15:25:37.180374 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:37 crc kubenswrapper[5021]: I0121 15:25:37.180390 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:37 crc kubenswrapper[5021]: I0121 15:25:37.180401 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:37Z","lastTransitionTime":"2026-01-21T15:25:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:37 crc kubenswrapper[5021]: I0121 15:25:37.283271 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:37 crc kubenswrapper[5021]: I0121 15:25:37.283316 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:37 crc kubenswrapper[5021]: I0121 15:25:37.283330 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:37 crc kubenswrapper[5021]: I0121 15:25:37.283351 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:37 crc kubenswrapper[5021]: I0121 15:25:37.283365 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:37Z","lastTransitionTime":"2026-01-21T15:25:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:37 crc kubenswrapper[5021]: I0121 15:25:37.385161 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:37 crc kubenswrapper[5021]: I0121 15:25:37.385196 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:37 crc kubenswrapper[5021]: I0121 15:25:37.385223 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:37 crc kubenswrapper[5021]: I0121 15:25:37.385236 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:37 crc kubenswrapper[5021]: I0121 15:25:37.385246 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:37Z","lastTransitionTime":"2026-01-21T15:25:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:37 crc kubenswrapper[5021]: I0121 15:25:37.488072 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:37 crc kubenswrapper[5021]: I0121 15:25:37.488117 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:37 crc kubenswrapper[5021]: I0121 15:25:37.488127 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:37 crc kubenswrapper[5021]: I0121 15:25:37.488141 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:37 crc kubenswrapper[5021]: I0121 15:25:37.488150 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:37Z","lastTransitionTime":"2026-01-21T15:25:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:37 crc kubenswrapper[5021]: I0121 15:25:37.590584 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:37 crc kubenswrapper[5021]: I0121 15:25:37.590649 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:37 crc kubenswrapper[5021]: I0121 15:25:37.590662 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:37 crc kubenswrapper[5021]: I0121 15:25:37.590694 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:37 crc kubenswrapper[5021]: I0121 15:25:37.590710 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:37Z","lastTransitionTime":"2026-01-21T15:25:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:37 crc kubenswrapper[5021]: I0121 15:25:37.694354 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:37 crc kubenswrapper[5021]: I0121 15:25:37.694417 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:37 crc kubenswrapper[5021]: I0121 15:25:37.694437 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:37 crc kubenswrapper[5021]: I0121 15:25:37.694459 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:37 crc kubenswrapper[5021]: I0121 15:25:37.694472 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:37Z","lastTransitionTime":"2026-01-21T15:25:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:37 crc kubenswrapper[5021]: I0121 15:25:37.732963 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-03 02:21:19.004543599 +0000 UTC Jan 21 15:25:37 crc kubenswrapper[5021]: I0121 15:25:37.797071 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:37 crc kubenswrapper[5021]: I0121 15:25:37.797140 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:37 crc kubenswrapper[5021]: I0121 15:25:37.797152 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:37 crc kubenswrapper[5021]: I0121 15:25:37.797173 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:37 crc kubenswrapper[5021]: I0121 15:25:37.797187 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:37Z","lastTransitionTime":"2026-01-21T15:25:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:37 crc kubenswrapper[5021]: I0121 15:25:37.899897 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:37 crc kubenswrapper[5021]: I0121 15:25:37.899960 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:37 crc kubenswrapper[5021]: I0121 15:25:37.899970 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:37 crc kubenswrapper[5021]: I0121 15:25:37.899985 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:37 crc kubenswrapper[5021]: I0121 15:25:37.899997 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:37Z","lastTransitionTime":"2026-01-21T15:25:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.002799 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.002865 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.002881 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.002939 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.002955 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:38Z","lastTransitionTime":"2026-01-21T15:25:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.107265 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.107314 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.107325 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.107344 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.107357 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:38Z","lastTransitionTime":"2026-01-21T15:25:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.210866 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.210929 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.210940 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.210954 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.210967 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:38Z","lastTransitionTime":"2026-01-21T15:25:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.313556 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.313659 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.313685 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.313724 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.313752 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:38Z","lastTransitionTime":"2026-01-21T15:25:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.417584 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.417674 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.417693 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.417723 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.417747 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:38Z","lastTransitionTime":"2026-01-21T15:25:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.520809 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.520865 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.520875 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.520889 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.520899 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:38Z","lastTransitionTime":"2026-01-21T15:25:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.623584 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.623619 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.623629 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.623644 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.623653 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:38Z","lastTransitionTime":"2026-01-21T15:25:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.726094 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.726172 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.726188 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.726208 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.726221 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:38Z","lastTransitionTime":"2026-01-21T15:25:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.733350 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-26 03:39:49.816036899 +0000 UTC Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.737847 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.737877 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.737900 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.738071 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:25:38 crc kubenswrapper[5021]: E0121 15:25:38.738035 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:25:38 crc kubenswrapper[5021]: E0121 15:25:38.738155 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:25:38 crc kubenswrapper[5021]: E0121 15:25:38.738227 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xtd2p" podUID="cb60592c-6770-457b-b2ae-2c6c8f2a4149" Jan 21 15:25:38 crc kubenswrapper[5021]: E0121 15:25:38.738315 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.756836 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bea603621cfb30c2b0a1dc2021ea515cb87cd590232f1c4696341f7eca1d4535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10f4a9dbab7b11fa3eaaae6060df484ff7e212710188c29acc1834f07310d49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbcf60368c95e2747fd275984fc6cfc714ca37c93d92edc446643a41c4022c56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39f53539825ef70fcde1b61f8e650f22c059fd674523bfe1882b9844923ce9c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6064978522c6ddd526c20221a2d4a73961beaea35b31eab9598087b1af017753\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"stHeaderAuthRequestController\\\\nI0121 15:24:36.730187 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 15:24:36.730206 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 15:24:36.730225 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 15:24:36.730232 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 15:24:36.735520 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0121 15:24:36.735652 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735731 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 15:24:36.735756 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 15:24:36.735765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 15:24:36.735770 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0121 15:24:36.737297 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0121 15:24:36.741633 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741680 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741699 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-1615556949/tls.crt::/tmp/serving-cert-1615556949/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769009060\\\\\\\\\\\\\\\" (2026-01-21 15:24:20 +0000 UTC to 2026-02-20 15:24:21 +0000 UTC (now=2026-01-21 15:24:36.741651328 +0000 UTC))\\\\\\\"\\\\nF0121 15:24:36.741699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06cf2acfbc236dc530bb69dff9e6e83755c426daba101b6b6abe9c23684d2b7d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:38Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.773626 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cec185f78f20cb42969ab7a1c67289c23ab5714e5c397b1fb5ee55540adcc80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:38Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.788211 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://da2a92bf0daa0e03e89d2a5ff93137363faab6817894aadb90e424b35c878da6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e8a3282b80d888e687ba06911f278868d7f50c3f0c52745beebc66440deeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:38Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.805824 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vg9bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78b7c66b-9935-480c-bf2e-9109b6141006\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9036131b35dd8128019792e8c0100da93fec521e8b003b05f17196b85991d46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t65tk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vg9bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:38Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.828239 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd7j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9276d2f11794c73e2d9b67ba12b81ec547e14eb6a5808fb86f64d46a12cffcd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee3a703c230946ab5d32db22cafa6b8a8945566d53e5b97402adaf6701f3addd\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T15:25:29Z\\\",\\\"message\\\":\\\"2026-01-21T15:24:44+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_7c03d55e-f879-4039-b9c2-3221ee89824b\\\\n2026-01-21T15:24:44+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_7c03d55e-f879-4039-b9c2-3221ee89824b to /host/opt/cni/bin/\\\\n2026-01-21T15:24:44Z [verbose] multus-daemon started\\\\n2026-01-21T15:24:44Z [verbose] Readiness Indicator file check\\\\n2026-01-21T15:25:29Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9rhz8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd7j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:38Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.828633 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.828671 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.828684 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.828702 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.828714 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:38Z","lastTransitionTime":"2026-01-21T15:25:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.845356 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-597dt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2265b990-9ae6-48a6-b93e-c91bf08a41f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7264e502c2112ab6e5c6ef790a042b5e3fd99d2f1fc117ca3c1245997cabdf59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgbzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://96d3eaf7bd96bfc2526f43d3a02a4d61c9ebfd3562ee0cbbe3a3fe19ddf2429c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgbzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-597dt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:38Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.862123 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:38Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.883957 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbaeb819c15bc6d2c7a27387f825f6f2b84a9671ca0d4440fc56b98c9bab7b52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf2232b9e5a3d00ab5e6640b47c6fddffc45b84322505c86450c23e884607366\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee527cf2f3d51077144d81d16dbb6b3563f6e40e30ba92d502b09801ecbca2ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://33a648b0889c361c99d72d947f51b4bf9ed5373bb3192c06f787a46be05694d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3346a404ecc46aaf27f9eebec820eceb15899c2da83fd513d8c1fde406d47ff8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ed7c1837f033493cb08cde88c75467a9fa1dda6ca0f1c72bb2f0c9dccfd0773\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a2c749aca14ef61231e1298f1fca331c578ca6eb0ca298d8db61ac45db9a1f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a2c749aca14ef61231e1298f1fca331c578ca6eb0ca298d8db61ac45db9a1f6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T15:25:12Z\\\",\\\"message\\\":\\\"-scheduler/openshift-kube-scheduler-crc\\\\nI0121 15:25:12.901764 6701 ovn.go:134] Ensuring zone local for Pod openshift-kube-scheduler/openshift-kube-scheduler-crc in node crc\\\\nI0121 15:25:12.901768 6701 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-scheduler/openshift-kube-scheduler-crc after 0 failed attempt(s)\\\\nI0121 15:25:12.901774 6701 default_network_controller.go:776] Recording success event on pod openshift-kube-scheduler/openshift-kube-scheduler-crc\\\\nI0121 15:25:12.901783 6701 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/multus-additional-cni-plugins-k9hxg\\\\nF0121 15:25:12.901785 6701 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate ha\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:25:11Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-9flhm_openshift-ovn-kubernetes(2e3f3965-4473-46d7-a613-2ed3e4b10ad7)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb81f970e9b92212535b8961f324f7e9904f3ead656d5aea28d1a7054245ede8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9flhm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:38Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.898577 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dq2bd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"67cee991-0227-45a5-bb2d-226481f03fd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45a1ed486d5aef56ffd0f6ed50755a384ab94ce452ea9b4d51804fdca6eb179b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8fmw9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:45Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dq2bd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:38Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.915365 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:38Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.932050 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.932115 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.932133 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.932156 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.932172 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:38Z","lastTransitionTime":"2026-01-21T15:25:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.933933 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-k9hxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ddf892f9-a048-4335-995e-de581763d230\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d45ae12ddf6d1ac783e31194495a45834964d0379bda6db0655e0194e6f6273b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34249e9db1c2f9f8a1d6f840f32de9cfa2b3f071de7a72784674e64607f513b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34249e9db1c2f9f8a1d6f840f32de9cfa2b3f071de7a72784674e64607f513b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-k9hxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:38Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.950324 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ce8d981292a75a56750a6c51bf6f523a6b29e0496d5b406455711ab49c4c81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c5895dd4f412ff99b134a78d67b65a3792a50da3f9a88407af678092a29d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n22xz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:38Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.975977 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f86c73a1-9c5e-4d01-856b-dce5ef6e0ae3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4972b65704958e549d665d88ee3470710096c4784f9872045274d8af63e2043d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e1f771251595f0de0a5ec1739ebe02a63de7d6a3cefdf90a42b48c115dab87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effb786620c9d65d8acf9881ed0f98a94de9f39ca7eb6870377958061f4dc9af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://50ec1a0333d7c102879ec60e1262b8035b347cad7f9107959e49b4718b3a79db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://21c65f33d2b7b951ba4b6f87e7246aa6911ac9b7db7be966578213fc3041441f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b79fe3d315f973b92fcbd896ee2fe852b7e80893bd51df3f4802fe230421322f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b79fe3d315f973b92fcbd896ee2fe852b7e80893bd51df3f4802fe230421322f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a18ff277d97036930e36d290c3abae4a918cddb2353e40d5618332a9e9e6362\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3a18ff277d97036930e36d290c3abae4a918cddb2353e40d5618332a9e9e6362\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://05695ff1eb5fa52521af079f1874dd650f22f233e0462daeeb9083142d5c3eee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05695ff1eb5fa52521af079f1874dd650f22f233e0462daeeb9083142d5c3eee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:38Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:38 crc kubenswrapper[5021]: I0121 15:25:38.992536 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97953e2c-3d85-44a4-9900-b3c53432cc90\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1f9f8b3712320f6149f9124f2c3d85a9957a64678d7f1d0a11728c00afc74cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f805306703ac31a2921b4d3bfd202ef7c3c71e5954c987ad3b4bbd4827b46568\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4543419f7cc5df1a3788158c9b4540bb0e5320349e033d6a4a495267a84f5dd8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effb345da8dd5ec896b8b390b24c11d5fafc667b987d984ea5df7647068c4f46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:38Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.008682 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bdccebb0-daa8-4163-87a3-191bef44cf94\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0650e3b31b409afa68b74eb066029d1ac832f14615432265ce968cbe9b89c78a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60ce022aa50e5962258046fb06f1008a15dab87927995d4308d0c2dc75fb9d9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e5bf8c1e3e1a89326ac8ad0bb8027da467c25709d34c16f81fd8915cc0cf76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19a3d19b378e54ad8ff325f81e065e19a20aad97b002393619e9165653d75a78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19a3d19b378e54ad8ff325f81e065e19a20aad97b002393619e9165653d75a78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:39Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.025839 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:39Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.035597 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.035650 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.035662 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.035686 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.035704 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:39Z","lastTransitionTime":"2026-01-21T15:25:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.042151 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0aed55e8987f8148a0e729da9e5dcad94bc05e1441eb8e087e05d8b81da4d2df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:39Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.057846 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-xtd2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb60592c-6770-457b-b2ae-2c6c8f2a4149\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xbnsh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xbnsh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:56Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-xtd2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:39Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.138775 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.138827 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.138840 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.138861 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.138875 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:39Z","lastTransitionTime":"2026-01-21T15:25:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.242672 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.242767 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.242785 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.242813 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.242834 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:39Z","lastTransitionTime":"2026-01-21T15:25:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.346059 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.346191 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.346515 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.347196 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.347290 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:39Z","lastTransitionTime":"2026-01-21T15:25:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.450238 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.450282 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.450296 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.450318 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.450332 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:39Z","lastTransitionTime":"2026-01-21T15:25:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.553036 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.553091 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.553104 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.553123 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.553135 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:39Z","lastTransitionTime":"2026-01-21T15:25:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.656481 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.656559 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.656576 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.656601 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.656618 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:39Z","lastTransitionTime":"2026-01-21T15:25:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.733709 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-28 22:34:30.221880504 +0000 UTC Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.737826 5021 scope.go:117] "RemoveContainer" containerID="6a2c749aca14ef61231e1298f1fca331c578ca6eb0ca298d8db61ac45db9a1f6" Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.765475 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.765887 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.765922 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.765963 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.765980 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:39Z","lastTransitionTime":"2026-01-21T15:25:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.869527 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.869696 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.869715 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.869733 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.869820 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:39Z","lastTransitionTime":"2026-01-21T15:25:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.972714 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.972785 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.972796 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.972817 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:39 crc kubenswrapper[5021]: I0121 15:25:39.972832 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:39Z","lastTransitionTime":"2026-01-21T15:25:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.076011 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.076069 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.076079 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.076098 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.076112 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:40Z","lastTransitionTime":"2026-01-21T15:25:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.179068 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.179127 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.179138 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.179156 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.179169 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:40Z","lastTransitionTime":"2026-01-21T15:25:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.297498 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.297547 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.297556 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.297574 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.297585 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:40Z","lastTransitionTime":"2026-01-21T15:25:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.400841 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.400902 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.400938 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.400961 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.400975 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:40Z","lastTransitionTime":"2026-01-21T15:25:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.505115 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.505174 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.505193 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.505212 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.505222 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:40Z","lastTransitionTime":"2026-01-21T15:25:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.527368 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-9flhm_2e3f3965-4473-46d7-a613-2ed3e4b10ad7/ovnkube-controller/2.log" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.532391 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" event={"ID":"2e3f3965-4473-46d7-a613-2ed3e4b10ad7","Type":"ContainerStarted","Data":"c7a8ac23da1f14fa76303b89fa5f285acf38ba3869b2cb7ccc9551612ba86912"} Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.533128 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.561015 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbaeb819c15bc6d2c7a27387f825f6f2b84a9671ca0d4440fc56b98c9bab7b52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf2232b9e5a3d00ab5e6640b47c6fddffc45b84322505c86450c23e884607366\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee527cf2f3d51077144d81d16dbb6b3563f6e40e30ba92d502b09801ecbca2ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://33a648b0889c361c99d72d947f51b4bf9ed5373bb3192c06f787a46be05694d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3346a404ecc46aaf27f9eebec820eceb15899c2da83fd513d8c1fde406d47ff8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ed7c1837f033493cb08cde88c75467a9fa1dda6ca0f1c72bb2f0c9dccfd0773\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7a8ac23da1f14fa76303b89fa5f285acf38ba3869b2cb7ccc9551612ba86912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a2c749aca14ef61231e1298f1fca331c578ca6eb0ca298d8db61ac45db9a1f6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T15:25:12Z\\\",\\\"message\\\":\\\"-scheduler/openshift-kube-scheduler-crc\\\\nI0121 15:25:12.901764 6701 ovn.go:134] Ensuring zone local for Pod openshift-kube-scheduler/openshift-kube-scheduler-crc in node crc\\\\nI0121 15:25:12.901768 6701 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-scheduler/openshift-kube-scheduler-crc after 0 failed attempt(s)\\\\nI0121 15:25:12.901774 6701 default_network_controller.go:776] Recording success event on pod openshift-kube-scheduler/openshift-kube-scheduler-crc\\\\nI0121 15:25:12.901783 6701 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/multus-additional-cni-plugins-k9hxg\\\\nF0121 15:25:12.901785 6701 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate ha\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:25:11Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:25:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb81f970e9b92212535b8961f324f7e9904f3ead656d5aea28d1a7054245ede8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9flhm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:40Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.577302 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dq2bd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"67cee991-0227-45a5-bb2d-226481f03fd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45a1ed486d5aef56ffd0f6ed50755a384ab94ce452ea9b4d51804fdca6eb179b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8fmw9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:45Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dq2bd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:40Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.594756 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:40Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.608136 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.608197 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.608213 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.608232 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.608248 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:40Z","lastTransitionTime":"2026-01-21T15:25:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.614729 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-k9hxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ddf892f9-a048-4335-995e-de581763d230\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d45ae12ddf6d1ac783e31194495a45834964d0379bda6db0655e0194e6f6273b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34249e9db1c2f9f8a1d6f840f32de9cfa2b3f071de7a72784674e64607f513b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34249e9db1c2f9f8a1d6f840f32de9cfa2b3f071de7a72784674e64607f513b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-k9hxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:40Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.630474 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ce8d981292a75a56750a6c51bf6f523a6b29e0496d5b406455711ab49c4c81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c5895dd4f412ff99b134a78d67b65a3792a50da3f9a88407af678092a29d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n22xz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:40Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.643677 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.643876 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:25:40 crc kubenswrapper[5021]: E0121 15:25:40.644014 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:44.643967981 +0000 UTC m=+146.179081870 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:25:40 crc kubenswrapper[5021]: E0121 15:25:40.644291 5021 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 21 15:25:40 crc kubenswrapper[5021]: E0121 15:25:40.644446 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-21 15:26:44.644412413 +0000 UTC m=+146.179526472 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.647287 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:40Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.672827 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f86c73a1-9c5e-4d01-856b-dce5ef6e0ae3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4972b65704958e549d665d88ee3470710096c4784f9872045274d8af63e2043d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e1f771251595f0de0a5ec1739ebe02a63de7d6a3cefdf90a42b48c115dab87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effb786620c9d65d8acf9881ed0f98a94de9f39ca7eb6870377958061f4dc9af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://50ec1a0333d7c102879ec60e1262b8035b347cad7f9107959e49b4718b3a79db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://21c65f33d2b7b951ba4b6f87e7246aa6911ac9b7db7be966578213fc3041441f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b79fe3d315f973b92fcbd896ee2fe852b7e80893bd51df3f4802fe230421322f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b79fe3d315f973b92fcbd896ee2fe852b7e80893bd51df3f4802fe230421322f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a18ff277d97036930e36d290c3abae4a918cddb2353e40d5618332a9e9e6362\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3a18ff277d97036930e36d290c3abae4a918cddb2353e40d5618332a9e9e6362\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://05695ff1eb5fa52521af079f1874dd650f22f233e0462daeeb9083142d5c3eee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05695ff1eb5fa52521af079f1874dd650f22f233e0462daeeb9083142d5c3eee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:40Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.689566 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97953e2c-3d85-44a4-9900-b3c53432cc90\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1f9f8b3712320f6149f9124f2c3d85a9957a64678d7f1d0a11728c00afc74cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f805306703ac31a2921b4d3bfd202ef7c3c71e5954c987ad3b4bbd4827b46568\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4543419f7cc5df1a3788158c9b4540bb0e5320349e033d6a4a495267a84f5dd8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effb345da8dd5ec896b8b390b24c11d5fafc667b987d984ea5df7647068c4f46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:40Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.710675 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bdccebb0-daa8-4163-87a3-191bef44cf94\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0650e3b31b409afa68b74eb066029d1ac832f14615432265ce968cbe9b89c78a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60ce022aa50e5962258046fb06f1008a15dab87927995d4308d0c2dc75fb9d9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e5bf8c1e3e1a89326ac8ad0bb8027da467c25709d34c16f81fd8915cc0cf76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19a3d19b378e54ad8ff325f81e065e19a20aad97b002393619e9165653d75a78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19a3d19b378e54ad8ff325f81e065e19a20aad97b002393619e9165653d75a78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:40Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.714682 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.714707 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.714718 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.714756 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.714770 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:40Z","lastTransitionTime":"2026-01-21T15:25:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.734695 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-27 06:45:27.513612563 +0000 UTC Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.737166 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:25:40 crc kubenswrapper[5021]: E0121 15:25:40.737321 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.737546 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:25:40 crc kubenswrapper[5021]: E0121 15:25:40.737606 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xtd2p" podUID="cb60592c-6770-457b-b2ae-2c6c8f2a4149" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.737714 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:25:40 crc kubenswrapper[5021]: E0121 15:25:40.737761 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.740114 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:25:40 crc kubenswrapper[5021]: E0121 15:25:40.740520 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.744350 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:40Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.745119 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.745207 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.745239 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:25:40 crc kubenswrapper[5021]: E0121 15:25:40.745442 5021 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 21 15:25:40 crc kubenswrapper[5021]: E0121 15:25:40.745480 5021 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 21 15:25:40 crc kubenswrapper[5021]: E0121 15:25:40.745496 5021 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 21 15:25:40 crc kubenswrapper[5021]: E0121 15:25:40.745509 5021 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 21 15:25:40 crc kubenswrapper[5021]: E0121 15:25:40.745522 5021 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 15:25:40 crc kubenswrapper[5021]: E0121 15:25:40.745530 5021 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 15:25:40 crc kubenswrapper[5021]: E0121 15:25:40.745633 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-21 15:26:44.745596299 +0000 UTC m=+146.280710368 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 15:25:40 crc kubenswrapper[5021]: E0121 15:25:40.745674 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-21 15:26:44.74566036 +0000 UTC m=+146.280774459 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 21 15:25:40 crc kubenswrapper[5021]: E0121 15:25:40.745573 5021 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 21 15:25:40 crc kubenswrapper[5021]: E0121 15:25:40.746314 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-21 15:26:44.746147084 +0000 UTC m=+146.281260973 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.759648 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0aed55e8987f8148a0e729da9e5dcad94bc05e1441eb8e087e05d8b81da4d2df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:40Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.775486 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-xtd2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb60592c-6770-457b-b2ae-2c6c8f2a4149\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xbnsh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xbnsh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:56Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-xtd2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:40Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.793954 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cec185f78f20cb42969ab7a1c67289c23ab5714e5c397b1fb5ee55540adcc80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:40Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.812033 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://da2a92bf0daa0e03e89d2a5ff93137363faab6817894aadb90e424b35c878da6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e8a3282b80d888e687ba06911f278868d7f50c3f0c52745beebc66440deeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:40Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.818485 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.818545 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.818560 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.818579 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.818591 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:40Z","lastTransitionTime":"2026-01-21T15:25:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.828795 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vg9bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78b7c66b-9935-480c-bf2e-9109b6141006\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9036131b35dd8128019792e8c0100da93fec521e8b003b05f17196b85991d46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t65tk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vg9bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:40Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.845220 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd7j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9276d2f11794c73e2d9b67ba12b81ec547e14eb6a5808fb86f64d46a12cffcd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee3a703c230946ab5d32db22cafa6b8a8945566d53e5b97402adaf6701f3addd\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T15:25:29Z\\\",\\\"message\\\":\\\"2026-01-21T15:24:44+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_7c03d55e-f879-4039-b9c2-3221ee89824b\\\\n2026-01-21T15:24:44+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_7c03d55e-f879-4039-b9c2-3221ee89824b to /host/opt/cni/bin/\\\\n2026-01-21T15:24:44Z [verbose] multus-daemon started\\\\n2026-01-21T15:24:44Z [verbose] Readiness Indicator file check\\\\n2026-01-21T15:25:29Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9rhz8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd7j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:40Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.861340 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-597dt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2265b990-9ae6-48a6-b93e-c91bf08a41f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7264e502c2112ab6e5c6ef790a042b5e3fd99d2f1fc117ca3c1245997cabdf59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgbzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://96d3eaf7bd96bfc2526f43d3a02a4d61c9ebfd3562ee0cbbe3a3fe19ddf2429c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgbzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-597dt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:40Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.880801 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bea603621cfb30c2b0a1dc2021ea515cb87cd590232f1c4696341f7eca1d4535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10f4a9dbab7b11fa3eaaae6060df484ff7e212710188c29acc1834f07310d49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbcf60368c95e2747fd275984fc6cfc714ca37c93d92edc446643a41c4022c56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39f53539825ef70fcde1b61f8e650f22c059fd674523bfe1882b9844923ce9c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6064978522c6ddd526c20221a2d4a73961beaea35b31eab9598087b1af017753\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"stHeaderAuthRequestController\\\\nI0121 15:24:36.730187 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 15:24:36.730206 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 15:24:36.730225 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 15:24:36.730232 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 15:24:36.735520 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0121 15:24:36.735652 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735731 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 15:24:36.735756 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 15:24:36.735765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 15:24:36.735770 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0121 15:24:36.737297 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0121 15:24:36.741633 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741680 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741699 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-1615556949/tls.crt::/tmp/serving-cert-1615556949/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769009060\\\\\\\\\\\\\\\" (2026-01-21 15:24:20 +0000 UTC to 2026-02-20 15:24:21 +0000 UTC (now=2026-01-21 15:24:36.741651328 +0000 UTC))\\\\\\\"\\\\nF0121 15:24:36.741699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06cf2acfbc236dc530bb69dff9e6e83755c426daba101b6b6abe9c23684d2b7d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:40Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.922309 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.922374 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.922389 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.922414 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:40 crc kubenswrapper[5021]: I0121 15:25:40.922430 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:40Z","lastTransitionTime":"2026-01-21T15:25:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.025701 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.025770 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.025784 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.025806 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.025823 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:41Z","lastTransitionTime":"2026-01-21T15:25:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.129041 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.129128 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.129140 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.129162 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.129181 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:41Z","lastTransitionTime":"2026-01-21T15:25:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.232316 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.232380 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.232392 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.232414 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.232428 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:41Z","lastTransitionTime":"2026-01-21T15:25:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.335729 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.335799 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.335815 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.335847 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.335896 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:41Z","lastTransitionTime":"2026-01-21T15:25:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.438601 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.438654 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.438666 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.438688 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.438702 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:41Z","lastTransitionTime":"2026-01-21T15:25:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.541057 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.541104 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.541113 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.541132 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.541147 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:41Z","lastTransitionTime":"2026-01-21T15:25:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.617206 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.617270 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.617284 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.617309 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.617322 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:41Z","lastTransitionTime":"2026-01-21T15:25:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:41 crc kubenswrapper[5021]: E0121 15:25:41.631436 5021 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:41Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:41Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:41Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:41Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90fd653a-5482-4360-a078-7b7d7b2b9201\\\",\\\"systemUUID\\\":\\\"758eea6e-3652-403c-8df7-2cf690a0b7a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:41Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.636608 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.636654 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.636669 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.636693 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.636708 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:41Z","lastTransitionTime":"2026-01-21T15:25:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:41 crc kubenswrapper[5021]: E0121 15:25:41.652728 5021 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:41Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:41Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:41Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:41Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90fd653a-5482-4360-a078-7b7d7b2b9201\\\",\\\"systemUUID\\\":\\\"758eea6e-3652-403c-8df7-2cf690a0b7a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:41Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.656945 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.656992 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.657008 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.657033 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.657050 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:41Z","lastTransitionTime":"2026-01-21T15:25:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:41 crc kubenswrapper[5021]: E0121 15:25:41.692605 5021 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:41Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:41Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:41Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:41Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90fd653a-5482-4360-a078-7b7d7b2b9201\\\",\\\"systemUUID\\\":\\\"758eea6e-3652-403c-8df7-2cf690a0b7a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:41Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.697661 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.697715 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.697728 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.697750 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.697766 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:41Z","lastTransitionTime":"2026-01-21T15:25:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:41 crc kubenswrapper[5021]: E0121 15:25:41.714779 5021 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:41Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:41Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:41Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:41Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90fd653a-5482-4360-a078-7b7d7b2b9201\\\",\\\"systemUUID\\\":\\\"758eea6e-3652-403c-8df7-2cf690a0b7a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:41Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.719883 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.719963 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.719976 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.719997 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.720010 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:41Z","lastTransitionTime":"2026-01-21T15:25:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.735186 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-18 18:19:29.780626872 +0000 UTC Jan 21 15:25:41 crc kubenswrapper[5021]: E0121 15:25:41.735512 5021 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:41Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:41Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:41Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:41Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90fd653a-5482-4360-a078-7b7d7b2b9201\\\",\\\"systemUUID\\\":\\\"758eea6e-3652-403c-8df7-2cf690a0b7a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:41Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:41 crc kubenswrapper[5021]: E0121 15:25:41.735726 5021 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.737953 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.737998 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.738008 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.738029 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.738044 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:41Z","lastTransitionTime":"2026-01-21T15:25:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.846557 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.846615 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.846630 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.846654 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.846670 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:41Z","lastTransitionTime":"2026-01-21T15:25:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.949441 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.949486 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.949496 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.949513 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:41 crc kubenswrapper[5021]: I0121 15:25:41.949527 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:41Z","lastTransitionTime":"2026-01-21T15:25:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.052743 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.052778 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.052788 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.052805 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.052817 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:42Z","lastTransitionTime":"2026-01-21T15:25:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.155985 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.156057 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.156070 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.156091 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.156107 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:42Z","lastTransitionTime":"2026-01-21T15:25:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.259687 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.259738 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.259752 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.259773 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.259789 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:42Z","lastTransitionTime":"2026-01-21T15:25:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.363322 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.363378 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.363396 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.363415 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.363428 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:42Z","lastTransitionTime":"2026-01-21T15:25:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.466576 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.466623 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.466636 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.466651 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.466662 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:42Z","lastTransitionTime":"2026-01-21T15:25:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.543734 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-9flhm_2e3f3965-4473-46d7-a613-2ed3e4b10ad7/ovnkube-controller/3.log" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.545043 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-9flhm_2e3f3965-4473-46d7-a613-2ed3e4b10ad7/ovnkube-controller/2.log" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.549157 5021 generic.go:334] "Generic (PLEG): container finished" podID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerID="c7a8ac23da1f14fa76303b89fa5f285acf38ba3869b2cb7ccc9551612ba86912" exitCode=1 Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.549233 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" event={"ID":"2e3f3965-4473-46d7-a613-2ed3e4b10ad7","Type":"ContainerDied","Data":"c7a8ac23da1f14fa76303b89fa5f285acf38ba3869b2cb7ccc9551612ba86912"} Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.549290 5021 scope.go:117] "RemoveContainer" containerID="6a2c749aca14ef61231e1298f1fca331c578ca6eb0ca298d8db61ac45db9a1f6" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.551455 5021 scope.go:117] "RemoveContainer" containerID="c7a8ac23da1f14fa76303b89fa5f285acf38ba3869b2cb7ccc9551612ba86912" Jan 21 15:25:42 crc kubenswrapper[5021]: E0121 15:25:42.551760 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-9flhm_openshift-ovn-kubernetes(2e3f3965-4473-46d7-a613-2ed3e4b10ad7)\"" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.570209 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-k9hxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ddf892f9-a048-4335-995e-de581763d230\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d45ae12ddf6d1ac783e31194495a45834964d0379bda6db0655e0194e6f6273b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34249e9db1c2f9f8a1d6f840f32de9cfa2b3f071de7a72784674e64607f513b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34249e9db1c2f9f8a1d6f840f32de9cfa2b3f071de7a72784674e64607f513b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-k9hxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:42Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.570616 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.570708 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.570723 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.570744 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.570760 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:42Z","lastTransitionTime":"2026-01-21T15:25:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.586953 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ce8d981292a75a56750a6c51bf6f523a6b29e0496d5b406455711ab49c4c81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c5895dd4f412ff99b134a78d67b65a3792a50da3f9a88407af678092a29d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n22xz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:42Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.601652 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:42Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.624656 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f86c73a1-9c5e-4d01-856b-dce5ef6e0ae3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4972b65704958e549d665d88ee3470710096c4784f9872045274d8af63e2043d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e1f771251595f0de0a5ec1739ebe02a63de7d6a3cefdf90a42b48c115dab87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effb786620c9d65d8acf9881ed0f98a94de9f39ca7eb6870377958061f4dc9af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://50ec1a0333d7c102879ec60e1262b8035b347cad7f9107959e49b4718b3a79db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://21c65f33d2b7b951ba4b6f87e7246aa6911ac9b7db7be966578213fc3041441f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b79fe3d315f973b92fcbd896ee2fe852b7e80893bd51df3f4802fe230421322f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b79fe3d315f973b92fcbd896ee2fe852b7e80893bd51df3f4802fe230421322f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a18ff277d97036930e36d290c3abae4a918cddb2353e40d5618332a9e9e6362\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3a18ff277d97036930e36d290c3abae4a918cddb2353e40d5618332a9e9e6362\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://05695ff1eb5fa52521af079f1874dd650f22f233e0462daeeb9083142d5c3eee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05695ff1eb5fa52521af079f1874dd650f22f233e0462daeeb9083142d5c3eee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:42Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.641246 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97953e2c-3d85-44a4-9900-b3c53432cc90\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1f9f8b3712320f6149f9124f2c3d85a9957a64678d7f1d0a11728c00afc74cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f805306703ac31a2921b4d3bfd202ef7c3c71e5954c987ad3b4bbd4827b46568\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4543419f7cc5df1a3788158c9b4540bb0e5320349e033d6a4a495267a84f5dd8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effb345da8dd5ec896b8b390b24c11d5fafc667b987d984ea5df7647068c4f46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:42Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.657494 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bdccebb0-daa8-4163-87a3-191bef44cf94\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0650e3b31b409afa68b74eb066029d1ac832f14615432265ce968cbe9b89c78a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60ce022aa50e5962258046fb06f1008a15dab87927995d4308d0c2dc75fb9d9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e5bf8c1e3e1a89326ac8ad0bb8027da467c25709d34c16f81fd8915cc0cf76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19a3d19b378e54ad8ff325f81e065e19a20aad97b002393619e9165653d75a78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19a3d19b378e54ad8ff325f81e065e19a20aad97b002393619e9165653d75a78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:42Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.674364 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:42Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.674468 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.674513 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.674526 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.674548 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.674562 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:42Z","lastTransitionTime":"2026-01-21T15:25:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.690558 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0aed55e8987f8148a0e729da9e5dcad94bc05e1441eb8e087e05d8b81da4d2df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:42Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.704958 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-xtd2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb60592c-6770-457b-b2ae-2c6c8f2a4149\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xbnsh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xbnsh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:56Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-xtd2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:42Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.721031 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cec185f78f20cb42969ab7a1c67289c23ab5714e5c397b1fb5ee55540adcc80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:42Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.735412 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-30 21:52:27.04465838 +0000 UTC Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.735797 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://da2a92bf0daa0e03e89d2a5ff93137363faab6817894aadb90e424b35c878da6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e8a3282b80d888e687ba06911f278868d7f50c3f0c52745beebc66440deeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:42Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.736793 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.736835 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.736848 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.736802 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:25:42 crc kubenswrapper[5021]: E0121 15:25:42.736948 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:25:42 crc kubenswrapper[5021]: E0121 15:25:42.737109 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:25:42 crc kubenswrapper[5021]: E0121 15:25:42.737175 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:25:42 crc kubenswrapper[5021]: E0121 15:25:42.737264 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xtd2p" podUID="cb60592c-6770-457b-b2ae-2c6c8f2a4149" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.751137 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vg9bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78b7c66b-9935-480c-bf2e-9109b6141006\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9036131b35dd8128019792e8c0100da93fec521e8b003b05f17196b85991d46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t65tk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vg9bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:42Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.770856 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd7j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9276d2f11794c73e2d9b67ba12b81ec547e14eb6a5808fb86f64d46a12cffcd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee3a703c230946ab5d32db22cafa6b8a8945566d53e5b97402adaf6701f3addd\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T15:25:29Z\\\",\\\"message\\\":\\\"2026-01-21T15:24:44+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_7c03d55e-f879-4039-b9c2-3221ee89824b\\\\n2026-01-21T15:24:44+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_7c03d55e-f879-4039-b9c2-3221ee89824b to /host/opt/cni/bin/\\\\n2026-01-21T15:24:44Z [verbose] multus-daemon started\\\\n2026-01-21T15:24:44Z [verbose] Readiness Indicator file check\\\\n2026-01-21T15:25:29Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9rhz8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd7j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:42Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.778495 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.778563 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.778578 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.778600 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.778616 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:42Z","lastTransitionTime":"2026-01-21T15:25:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.786406 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-597dt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2265b990-9ae6-48a6-b93e-c91bf08a41f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7264e502c2112ab6e5c6ef790a042b5e3fd99d2f1fc117ca3c1245997cabdf59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgbzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://96d3eaf7bd96bfc2526f43d3a02a4d61c9ebfd3562ee0cbbe3a3fe19ddf2429c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgbzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-597dt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:42Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.805143 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bea603621cfb30c2b0a1dc2021ea515cb87cd590232f1c4696341f7eca1d4535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10f4a9dbab7b11fa3eaaae6060df484ff7e212710188c29acc1834f07310d49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbcf60368c95e2747fd275984fc6cfc714ca37c93d92edc446643a41c4022c56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39f53539825ef70fcde1b61f8e650f22c059fd674523bfe1882b9844923ce9c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6064978522c6ddd526c20221a2d4a73961beaea35b31eab9598087b1af017753\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"stHeaderAuthRequestController\\\\nI0121 15:24:36.730187 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 15:24:36.730206 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 15:24:36.730225 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 15:24:36.730232 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 15:24:36.735520 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0121 15:24:36.735652 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735731 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 15:24:36.735756 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 15:24:36.735765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 15:24:36.735770 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0121 15:24:36.737297 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0121 15:24:36.741633 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741680 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741699 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-1615556949/tls.crt::/tmp/serving-cert-1615556949/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769009060\\\\\\\\\\\\\\\" (2026-01-21 15:24:20 +0000 UTC to 2026-02-20 15:24:21 +0000 UTC (now=2026-01-21 15:24:36.741651328 +0000 UTC))\\\\\\\"\\\\nF0121 15:24:36.741699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06cf2acfbc236dc530bb69dff9e6e83755c426daba101b6b6abe9c23684d2b7d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:42Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.826568 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbaeb819c15bc6d2c7a27387f825f6f2b84a9671ca0d4440fc56b98c9bab7b52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf2232b9e5a3d00ab5e6640b47c6fddffc45b84322505c86450c23e884607366\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee527cf2f3d51077144d81d16dbb6b3563f6e40e30ba92d502b09801ecbca2ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://33a648b0889c361c99d72d947f51b4bf9ed5373bb3192c06f787a46be05694d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3346a404ecc46aaf27f9eebec820eceb15899c2da83fd513d8c1fde406d47ff8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ed7c1837f033493cb08cde88c75467a9fa1dda6ca0f1c72bb2f0c9dccfd0773\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7a8ac23da1f14fa76303b89fa5f285acf38ba3869b2cb7ccc9551612ba86912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a2c749aca14ef61231e1298f1fca331c578ca6eb0ca298d8db61ac45db9a1f6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T15:25:12Z\\\",\\\"message\\\":\\\"-scheduler/openshift-kube-scheduler-crc\\\\nI0121 15:25:12.901764 6701 ovn.go:134] Ensuring zone local for Pod openshift-kube-scheduler/openshift-kube-scheduler-crc in node crc\\\\nI0121 15:25:12.901768 6701 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-scheduler/openshift-kube-scheduler-crc after 0 failed attempt(s)\\\\nI0121 15:25:12.901774 6701 default_network_controller.go:776] Recording success event on pod openshift-kube-scheduler/openshift-kube-scheduler-crc\\\\nI0121 15:25:12.901783 6701 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/multus-additional-cni-plugins-k9hxg\\\\nF0121 15:25:12.901785 6701 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate ha\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:25:11Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7a8ac23da1f14fa76303b89fa5f285acf38ba3869b2cb7ccc9551612ba86912\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T15:25:41Z\\\",\\\"message\\\":\\\"rk=default are: map[]\\\\nI0121 15:25:41.146251 7130 lb_config.go:1031] Cluster endpoints for openshift-network-console/networking-console-plugin for network=default are: map[]\\\\nF0121 15:25:41.146225 7130 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:41Z is after 2025-08-24T17:21:41Z]\\\\nI0121 15:25:41.146258 7130 services_controller.go:444] Built service openshift-apiserver-operator/metrics LB per-node configs for network=default: []services.lbConfig(nil)\\\\nI0121 15:25:41.146275 7130 services_controller.go:445] Built service openshift-apiserver-operator/metrics LB template configs for network=default: []services.lbC\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:25:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb81f970e9b92212535b8961f324f7e9904f3ead656d5aea28d1a7054245ede8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9flhm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:42Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.841531 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dq2bd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"67cee991-0227-45a5-bb2d-226481f03fd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45a1ed486d5aef56ffd0f6ed50755a384ab94ce452ea9b4d51804fdca6eb179b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8fmw9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:45Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dq2bd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:42Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.857068 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:42Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.881971 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.882018 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.882033 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.882065 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.882081 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:42Z","lastTransitionTime":"2026-01-21T15:25:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.985884 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.985959 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.985978 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.986003 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:42 crc kubenswrapper[5021]: I0121 15:25:42.986017 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:42Z","lastTransitionTime":"2026-01-21T15:25:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:43 crc kubenswrapper[5021]: I0121 15:25:43.088764 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:43 crc kubenswrapper[5021]: I0121 15:25:43.088803 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:43 crc kubenswrapper[5021]: I0121 15:25:43.088826 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:43 crc kubenswrapper[5021]: I0121 15:25:43.088843 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:43 crc kubenswrapper[5021]: I0121 15:25:43.088851 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:43Z","lastTransitionTime":"2026-01-21T15:25:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:43 crc kubenswrapper[5021]: I0121 15:25:43.191837 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:43 crc kubenswrapper[5021]: I0121 15:25:43.191884 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:43 crc kubenswrapper[5021]: I0121 15:25:43.191895 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:43 crc kubenswrapper[5021]: I0121 15:25:43.191942 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:43 crc kubenswrapper[5021]: I0121 15:25:43.191955 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:43Z","lastTransitionTime":"2026-01-21T15:25:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:43 crc kubenswrapper[5021]: I0121 15:25:43.295515 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:43 crc kubenswrapper[5021]: I0121 15:25:43.295565 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:43 crc kubenswrapper[5021]: I0121 15:25:43.295576 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:43 crc kubenswrapper[5021]: I0121 15:25:43.295594 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:43 crc kubenswrapper[5021]: I0121 15:25:43.295607 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:43Z","lastTransitionTime":"2026-01-21T15:25:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:43 crc kubenswrapper[5021]: I0121 15:25:43.399045 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:43 crc kubenswrapper[5021]: I0121 15:25:43.399106 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:43 crc kubenswrapper[5021]: I0121 15:25:43.399118 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:43 crc kubenswrapper[5021]: I0121 15:25:43.399138 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:43 crc kubenswrapper[5021]: I0121 15:25:43.399151 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:43Z","lastTransitionTime":"2026-01-21T15:25:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:43 crc kubenswrapper[5021]: I0121 15:25:43.501592 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:43 crc kubenswrapper[5021]: I0121 15:25:43.501652 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:43 crc kubenswrapper[5021]: I0121 15:25:43.501666 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:43 crc kubenswrapper[5021]: I0121 15:25:43.501690 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:43 crc kubenswrapper[5021]: I0121 15:25:43.501704 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:43Z","lastTransitionTime":"2026-01-21T15:25:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:43 crc kubenswrapper[5021]: I0121 15:25:43.554808 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-9flhm_2e3f3965-4473-46d7-a613-2ed3e4b10ad7/ovnkube-controller/3.log" Jan 21 15:25:43 crc kubenswrapper[5021]: I0121 15:25:43.605074 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:43 crc kubenswrapper[5021]: I0121 15:25:43.605146 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:43 crc kubenswrapper[5021]: I0121 15:25:43.605166 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:43 crc kubenswrapper[5021]: I0121 15:25:43.605200 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:43 crc kubenswrapper[5021]: I0121 15:25:43.605221 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:43Z","lastTransitionTime":"2026-01-21T15:25:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:43 crc kubenswrapper[5021]: I0121 15:25:43.709168 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:43 crc kubenswrapper[5021]: I0121 15:25:43.709222 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:43 crc kubenswrapper[5021]: I0121 15:25:43.709234 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:43 crc kubenswrapper[5021]: I0121 15:25:43.709256 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:43 crc kubenswrapper[5021]: I0121 15:25:43.709270 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:43Z","lastTransitionTime":"2026-01-21T15:25:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:43 crc kubenswrapper[5021]: I0121 15:25:43.736212 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-18 16:26:16.478237727 +0000 UTC Jan 21 15:25:43 crc kubenswrapper[5021]: I0121 15:25:43.812317 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:43 crc kubenswrapper[5021]: I0121 15:25:43.812388 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:43 crc kubenswrapper[5021]: I0121 15:25:43.812401 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:43 crc kubenswrapper[5021]: I0121 15:25:43.812426 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:43 crc kubenswrapper[5021]: I0121 15:25:43.812440 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:43Z","lastTransitionTime":"2026-01-21T15:25:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:43 crc kubenswrapper[5021]: I0121 15:25:43.916039 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:43 crc kubenswrapper[5021]: I0121 15:25:43.916119 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:43 crc kubenswrapper[5021]: I0121 15:25:43.916135 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:43 crc kubenswrapper[5021]: I0121 15:25:43.916171 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:43 crc kubenswrapper[5021]: I0121 15:25:43.916188 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:43Z","lastTransitionTime":"2026-01-21T15:25:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.019281 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.019336 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.019348 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.019367 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.019379 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:44Z","lastTransitionTime":"2026-01-21T15:25:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.122327 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.122395 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.122409 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.122436 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.122456 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:44Z","lastTransitionTime":"2026-01-21T15:25:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.225821 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.225881 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.225926 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.225948 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.225963 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:44Z","lastTransitionTime":"2026-01-21T15:25:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.328714 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.328771 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.328783 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.328805 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.328819 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:44Z","lastTransitionTime":"2026-01-21T15:25:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.432172 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.432233 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.432245 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.432264 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.432276 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:44Z","lastTransitionTime":"2026-01-21T15:25:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.535252 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.535325 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.535337 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.535357 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.535372 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:44Z","lastTransitionTime":"2026-01-21T15:25:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.639333 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.639388 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.639402 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.639429 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.639443 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:44Z","lastTransitionTime":"2026-01-21T15:25:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.736979 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-18 21:36:07.91838534 +0000 UTC Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.737198 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.737198 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:25:44 crc kubenswrapper[5021]: E0121 15:25:44.737393 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.737335 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.737195 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:25:44 crc kubenswrapper[5021]: E0121 15:25:44.737553 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:25:44 crc kubenswrapper[5021]: E0121 15:25:44.737755 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:25:44 crc kubenswrapper[5021]: E0121 15:25:44.737855 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xtd2p" podUID="cb60592c-6770-457b-b2ae-2c6c8f2a4149" Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.741432 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.741476 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.741489 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.741507 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.741522 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:44Z","lastTransitionTime":"2026-01-21T15:25:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.844430 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.844483 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.844495 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.844515 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.844531 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:44Z","lastTransitionTime":"2026-01-21T15:25:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.947795 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.947854 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.947863 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.947882 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:44 crc kubenswrapper[5021]: I0121 15:25:44.947894 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:44Z","lastTransitionTime":"2026-01-21T15:25:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:45 crc kubenswrapper[5021]: I0121 15:25:45.050820 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:45 crc kubenswrapper[5021]: I0121 15:25:45.050872 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:45 crc kubenswrapper[5021]: I0121 15:25:45.050883 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:45 crc kubenswrapper[5021]: I0121 15:25:45.050902 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:45 crc kubenswrapper[5021]: I0121 15:25:45.050935 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:45Z","lastTransitionTime":"2026-01-21T15:25:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:45 crc kubenswrapper[5021]: I0121 15:25:45.154442 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:45 crc kubenswrapper[5021]: I0121 15:25:45.154488 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:45 crc kubenswrapper[5021]: I0121 15:25:45.154499 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:45 crc kubenswrapper[5021]: I0121 15:25:45.154515 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:45 crc kubenswrapper[5021]: I0121 15:25:45.154524 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:45Z","lastTransitionTime":"2026-01-21T15:25:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:45 crc kubenswrapper[5021]: I0121 15:25:45.258157 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:45 crc kubenswrapper[5021]: I0121 15:25:45.258206 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:45 crc kubenswrapper[5021]: I0121 15:25:45.258216 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:45 crc kubenswrapper[5021]: I0121 15:25:45.258233 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:45 crc kubenswrapper[5021]: I0121 15:25:45.258242 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:45Z","lastTransitionTime":"2026-01-21T15:25:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:45 crc kubenswrapper[5021]: I0121 15:25:45.361290 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:45 crc kubenswrapper[5021]: I0121 15:25:45.361386 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:45 crc kubenswrapper[5021]: I0121 15:25:45.361404 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:45 crc kubenswrapper[5021]: I0121 15:25:45.361434 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:45 crc kubenswrapper[5021]: I0121 15:25:45.361453 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:45Z","lastTransitionTime":"2026-01-21T15:25:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:45 crc kubenswrapper[5021]: I0121 15:25:45.464897 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:45 crc kubenswrapper[5021]: I0121 15:25:45.464997 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:45 crc kubenswrapper[5021]: I0121 15:25:45.465015 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:45 crc kubenswrapper[5021]: I0121 15:25:45.465041 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:45 crc kubenswrapper[5021]: I0121 15:25:45.465065 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:45Z","lastTransitionTime":"2026-01-21T15:25:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:45 crc kubenswrapper[5021]: I0121 15:25:45.567457 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:45 crc kubenswrapper[5021]: I0121 15:25:45.567539 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:45 crc kubenswrapper[5021]: I0121 15:25:45.567557 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:45 crc kubenswrapper[5021]: I0121 15:25:45.567587 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:45 crc kubenswrapper[5021]: I0121 15:25:45.567605 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:45Z","lastTransitionTime":"2026-01-21T15:25:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:45 crc kubenswrapper[5021]: I0121 15:25:45.670638 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:45 crc kubenswrapper[5021]: I0121 15:25:45.670690 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:45 crc kubenswrapper[5021]: I0121 15:25:45.670702 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:45 crc kubenswrapper[5021]: I0121 15:25:45.670719 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:45 crc kubenswrapper[5021]: I0121 15:25:45.670733 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:45Z","lastTransitionTime":"2026-01-21T15:25:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:45 crc kubenswrapper[5021]: I0121 15:25:45.737538 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-29 08:01:59.260232862 +0000 UTC Jan 21 15:25:45 crc kubenswrapper[5021]: I0121 15:25:45.774233 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:45 crc kubenswrapper[5021]: I0121 15:25:45.774312 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:45 crc kubenswrapper[5021]: I0121 15:25:45.774327 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:45 crc kubenswrapper[5021]: I0121 15:25:45.774350 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:45 crc kubenswrapper[5021]: I0121 15:25:45.774362 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:45Z","lastTransitionTime":"2026-01-21T15:25:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:45 crc kubenswrapper[5021]: I0121 15:25:45.878428 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:45 crc kubenswrapper[5021]: I0121 15:25:45.878484 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:45 crc kubenswrapper[5021]: I0121 15:25:45.878493 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:45 crc kubenswrapper[5021]: I0121 15:25:45.878513 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:45 crc kubenswrapper[5021]: I0121 15:25:45.878524 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:45Z","lastTransitionTime":"2026-01-21T15:25:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:45 crc kubenswrapper[5021]: I0121 15:25:45.981880 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:45 crc kubenswrapper[5021]: I0121 15:25:45.981971 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:45 crc kubenswrapper[5021]: I0121 15:25:45.981985 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:45 crc kubenswrapper[5021]: I0121 15:25:45.982011 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:45 crc kubenswrapper[5021]: I0121 15:25:45.982036 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:45Z","lastTransitionTime":"2026-01-21T15:25:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:46 crc kubenswrapper[5021]: I0121 15:25:46.085621 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:46 crc kubenswrapper[5021]: I0121 15:25:46.085696 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:46 crc kubenswrapper[5021]: I0121 15:25:46.085711 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:46 crc kubenswrapper[5021]: I0121 15:25:46.085740 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:46 crc kubenswrapper[5021]: I0121 15:25:46.085758 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:46Z","lastTransitionTime":"2026-01-21T15:25:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:46 crc kubenswrapper[5021]: I0121 15:25:46.189063 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:46 crc kubenswrapper[5021]: I0121 15:25:46.189126 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:46 crc kubenswrapper[5021]: I0121 15:25:46.189138 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:46 crc kubenswrapper[5021]: I0121 15:25:46.189156 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:46 crc kubenswrapper[5021]: I0121 15:25:46.189167 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:46Z","lastTransitionTime":"2026-01-21T15:25:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:46 crc kubenswrapper[5021]: I0121 15:25:46.292217 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:46 crc kubenswrapper[5021]: I0121 15:25:46.292252 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:46 crc kubenswrapper[5021]: I0121 15:25:46.292262 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:46 crc kubenswrapper[5021]: I0121 15:25:46.292274 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:46 crc kubenswrapper[5021]: I0121 15:25:46.292283 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:46Z","lastTransitionTime":"2026-01-21T15:25:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:46 crc kubenswrapper[5021]: I0121 15:25:46.395825 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:46 crc kubenswrapper[5021]: I0121 15:25:46.395878 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:46 crc kubenswrapper[5021]: I0121 15:25:46.395888 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:46 crc kubenswrapper[5021]: I0121 15:25:46.395953 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:46 crc kubenswrapper[5021]: I0121 15:25:46.395969 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:46Z","lastTransitionTime":"2026-01-21T15:25:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:46 crc kubenswrapper[5021]: I0121 15:25:46.499411 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:46 crc kubenswrapper[5021]: I0121 15:25:46.499452 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:46 crc kubenswrapper[5021]: I0121 15:25:46.499462 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:46 crc kubenswrapper[5021]: I0121 15:25:46.499476 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:46 crc kubenswrapper[5021]: I0121 15:25:46.499485 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:46Z","lastTransitionTime":"2026-01-21T15:25:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:46 crc kubenswrapper[5021]: I0121 15:25:46.604264 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:46 crc kubenswrapper[5021]: I0121 15:25:46.604336 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:46 crc kubenswrapper[5021]: I0121 15:25:46.604347 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:46 crc kubenswrapper[5021]: I0121 15:25:46.604370 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:46 crc kubenswrapper[5021]: I0121 15:25:46.604384 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:46Z","lastTransitionTime":"2026-01-21T15:25:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:46 crc kubenswrapper[5021]: I0121 15:25:46.707322 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:46 crc kubenswrapper[5021]: I0121 15:25:46.707375 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:46 crc kubenswrapper[5021]: I0121 15:25:46.707386 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:46 crc kubenswrapper[5021]: I0121 15:25:46.707407 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:46 crc kubenswrapper[5021]: I0121 15:25:46.707420 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:46Z","lastTransitionTime":"2026-01-21T15:25:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:46 crc kubenswrapper[5021]: I0121 15:25:46.737229 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:25:46 crc kubenswrapper[5021]: I0121 15:25:46.737234 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:25:46 crc kubenswrapper[5021]: I0121 15:25:46.737234 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:25:46 crc kubenswrapper[5021]: I0121 15:25:46.737260 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:25:46 crc kubenswrapper[5021]: E0121 15:25:46.737454 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:25:46 crc kubenswrapper[5021]: E0121 15:25:46.737735 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xtd2p" podUID="cb60592c-6770-457b-b2ae-2c6c8f2a4149" Jan 21 15:25:46 crc kubenswrapper[5021]: E0121 15:25:46.737886 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:25:46 crc kubenswrapper[5021]: E0121 15:25:46.738071 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:25:46 crc kubenswrapper[5021]: I0121 15:25:46.738494 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-01 10:25:41.460622646 +0000 UTC Jan 21 15:25:46 crc kubenswrapper[5021]: I0121 15:25:46.810128 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:46 crc kubenswrapper[5021]: I0121 15:25:46.810171 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:46 crc kubenswrapper[5021]: I0121 15:25:46.810183 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:46 crc kubenswrapper[5021]: I0121 15:25:46.810202 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:46 crc kubenswrapper[5021]: I0121 15:25:46.810216 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:46Z","lastTransitionTime":"2026-01-21T15:25:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:46 crc kubenswrapper[5021]: I0121 15:25:46.914143 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:46 crc kubenswrapper[5021]: I0121 15:25:46.914202 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:46 crc kubenswrapper[5021]: I0121 15:25:46.914216 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:46 crc kubenswrapper[5021]: I0121 15:25:46.914236 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:46 crc kubenswrapper[5021]: I0121 15:25:46.914249 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:46Z","lastTransitionTime":"2026-01-21T15:25:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:47 crc kubenswrapper[5021]: I0121 15:25:47.017591 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:47 crc kubenswrapper[5021]: I0121 15:25:47.017958 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:47 crc kubenswrapper[5021]: I0121 15:25:47.018087 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:47 crc kubenswrapper[5021]: I0121 15:25:47.018204 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:47 crc kubenswrapper[5021]: I0121 15:25:47.018303 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:47Z","lastTransitionTime":"2026-01-21T15:25:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:47 crc kubenswrapper[5021]: I0121 15:25:47.121088 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:47 crc kubenswrapper[5021]: I0121 15:25:47.121138 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:47 crc kubenswrapper[5021]: I0121 15:25:47.121155 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:47 crc kubenswrapper[5021]: I0121 15:25:47.121177 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:47 crc kubenswrapper[5021]: I0121 15:25:47.121190 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:47Z","lastTransitionTime":"2026-01-21T15:25:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:47 crc kubenswrapper[5021]: I0121 15:25:47.224771 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:47 crc kubenswrapper[5021]: I0121 15:25:47.224835 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:47 crc kubenswrapper[5021]: I0121 15:25:47.224845 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:47 crc kubenswrapper[5021]: I0121 15:25:47.224862 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:47 crc kubenswrapper[5021]: I0121 15:25:47.224872 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:47Z","lastTransitionTime":"2026-01-21T15:25:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:47 crc kubenswrapper[5021]: I0121 15:25:47.328257 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:47 crc kubenswrapper[5021]: I0121 15:25:47.328318 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:47 crc kubenswrapper[5021]: I0121 15:25:47.328330 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:47 crc kubenswrapper[5021]: I0121 15:25:47.328354 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:47 crc kubenswrapper[5021]: I0121 15:25:47.328369 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:47Z","lastTransitionTime":"2026-01-21T15:25:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:47 crc kubenswrapper[5021]: I0121 15:25:47.431066 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:47 crc kubenswrapper[5021]: I0121 15:25:47.431416 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:47 crc kubenswrapper[5021]: I0121 15:25:47.431510 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:47 crc kubenswrapper[5021]: I0121 15:25:47.431594 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:47 crc kubenswrapper[5021]: I0121 15:25:47.431681 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:47Z","lastTransitionTime":"2026-01-21T15:25:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:47 crc kubenswrapper[5021]: I0121 15:25:47.534835 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:47 crc kubenswrapper[5021]: I0121 15:25:47.534883 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:47 crc kubenswrapper[5021]: I0121 15:25:47.534894 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:47 crc kubenswrapper[5021]: I0121 15:25:47.534932 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:47 crc kubenswrapper[5021]: I0121 15:25:47.534945 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:47Z","lastTransitionTime":"2026-01-21T15:25:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:47 crc kubenswrapper[5021]: I0121 15:25:47.637885 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:47 crc kubenswrapper[5021]: I0121 15:25:47.637953 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:47 crc kubenswrapper[5021]: I0121 15:25:47.637965 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:47 crc kubenswrapper[5021]: I0121 15:25:47.637982 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:47 crc kubenswrapper[5021]: I0121 15:25:47.637995 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:47Z","lastTransitionTime":"2026-01-21T15:25:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:47 crc kubenswrapper[5021]: I0121 15:25:47.738802 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-18 19:24:18.695950259 +0000 UTC Jan 21 15:25:47 crc kubenswrapper[5021]: I0121 15:25:47.740403 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:47 crc kubenswrapper[5021]: I0121 15:25:47.740529 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:47 crc kubenswrapper[5021]: I0121 15:25:47.740610 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:47 crc kubenswrapper[5021]: I0121 15:25:47.740697 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:47 crc kubenswrapper[5021]: I0121 15:25:47.740781 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:47Z","lastTransitionTime":"2026-01-21T15:25:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:47 crc kubenswrapper[5021]: I0121 15:25:47.843315 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:47 crc kubenswrapper[5021]: I0121 15:25:47.843377 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:47 crc kubenswrapper[5021]: I0121 15:25:47.843391 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:47 crc kubenswrapper[5021]: I0121 15:25:47.843408 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:47 crc kubenswrapper[5021]: I0121 15:25:47.843422 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:47Z","lastTransitionTime":"2026-01-21T15:25:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:47 crc kubenswrapper[5021]: I0121 15:25:47.946681 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:47 crc kubenswrapper[5021]: I0121 15:25:47.946755 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:47 crc kubenswrapper[5021]: I0121 15:25:47.946764 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:47 crc kubenswrapper[5021]: I0121 15:25:47.946783 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:47 crc kubenswrapper[5021]: I0121 15:25:47.946793 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:47Z","lastTransitionTime":"2026-01-21T15:25:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.049178 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.049229 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.049238 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.049255 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.049265 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:48Z","lastTransitionTime":"2026-01-21T15:25:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.152139 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.152206 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.152221 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.152244 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.152262 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:48Z","lastTransitionTime":"2026-01-21T15:25:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.255109 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.255529 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.255612 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.255767 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.255862 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:48Z","lastTransitionTime":"2026-01-21T15:25:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.359301 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.359333 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.359342 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.359357 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.359384 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:48Z","lastTransitionTime":"2026-01-21T15:25:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.462541 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.462604 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.462613 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.462635 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.462645 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:48Z","lastTransitionTime":"2026-01-21T15:25:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.565485 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.565577 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.565593 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.565618 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.565642 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:48Z","lastTransitionTime":"2026-01-21T15:25:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.668842 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.668922 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.668938 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.668963 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.668988 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:48Z","lastTransitionTime":"2026-01-21T15:25:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.737659 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:25:48 crc kubenswrapper[5021]: E0121 15:25:48.737817 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.737872 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.738013 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:25:48 crc kubenswrapper[5021]: E0121 15:25:48.738154 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xtd2p" podUID="cb60592c-6770-457b-b2ae-2c6c8f2a4149" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.738317 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:25:48 crc kubenswrapper[5021]: E0121 15:25:48.738470 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:25:48 crc kubenswrapper[5021]: E0121 15:25:48.738821 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.738976 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-21 16:35:51.96457052 +0000 UTC Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.760625 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f86c73a1-9c5e-4d01-856b-dce5ef6e0ae3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4972b65704958e549d665d88ee3470710096c4784f9872045274d8af63e2043d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e1f771251595f0de0a5ec1739ebe02a63de7d6a3cefdf90a42b48c115dab87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effb786620c9d65d8acf9881ed0f98a94de9f39ca7eb6870377958061f4dc9af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://50ec1a0333d7c102879ec60e1262b8035b347cad7f9107959e49b4718b3a79db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://21c65f33d2b7b951ba4b6f87e7246aa6911ac9b7db7be966578213fc3041441f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b79fe3d315f973b92fcbd896ee2fe852b7e80893bd51df3f4802fe230421322f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b79fe3d315f973b92fcbd896ee2fe852b7e80893bd51df3f4802fe230421322f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a18ff277d97036930e36d290c3abae4a918cddb2353e40d5618332a9e9e6362\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3a18ff277d97036930e36d290c3abae4a918cddb2353e40d5618332a9e9e6362\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://05695ff1eb5fa52521af079f1874dd650f22f233e0462daeeb9083142d5c3eee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05695ff1eb5fa52521af079f1874dd650f22f233e0462daeeb9083142d5c3eee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.771540 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.771596 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.771608 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.771627 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.771640 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:48Z","lastTransitionTime":"2026-01-21T15:25:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.775338 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97953e2c-3d85-44a4-9900-b3c53432cc90\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1f9f8b3712320f6149f9124f2c3d85a9957a64678d7f1d0a11728c00afc74cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f805306703ac31a2921b4d3bfd202ef7c3c71e5954c987ad3b4bbd4827b46568\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4543419f7cc5df1a3788158c9b4540bb0e5320349e033d6a4a495267a84f5dd8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effb345da8dd5ec896b8b390b24c11d5fafc667b987d984ea5df7647068c4f46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.787777 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bdccebb0-daa8-4163-87a3-191bef44cf94\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0650e3b31b409afa68b74eb066029d1ac832f14615432265ce968cbe9b89c78a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60ce022aa50e5962258046fb06f1008a15dab87927995d4308d0c2dc75fb9d9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e5bf8c1e3e1a89326ac8ad0bb8027da467c25709d34c16f81fd8915cc0cf76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19a3d19b378e54ad8ff325f81e065e19a20aad97b002393619e9165653d75a78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19a3d19b378e54ad8ff325f81e065e19a20aad97b002393619e9165653d75a78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.803001 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.819896 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0aed55e8987f8148a0e729da9e5dcad94bc05e1441eb8e087e05d8b81da4d2df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.836653 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-xtd2p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb60592c-6770-457b-b2ae-2c6c8f2a4149\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xbnsh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xbnsh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:56Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-xtd2p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.853366 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bea603621cfb30c2b0a1dc2021ea515cb87cd590232f1c4696341f7eca1d4535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10f4a9dbab7b11fa3eaaae6060df484ff7e212710188c29acc1834f07310d49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbcf60368c95e2747fd275984fc6cfc714ca37c93d92edc446643a41c4022c56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39f53539825ef70fcde1b61f8e650f22c059fd674523bfe1882b9844923ce9c1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6064978522c6ddd526c20221a2d4a73961beaea35b31eab9598087b1af017753\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"stHeaderAuthRequestController\\\\nI0121 15:24:36.730187 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0121 15:24:36.730206 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0121 15:24:36.730225 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0121 15:24:36.730232 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0121 15:24:36.735520 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0121 15:24:36.735652 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735731 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0121 15:24:36.735750 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0121 15:24:36.735756 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0121 15:24:36.735765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0121 15:24:36.735770 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0121 15:24:36.737297 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0121 15:24:36.741633 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741680 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI0121 15:24:36.741699 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-1615556949/tls.crt::/tmp/serving-cert-1615556949/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769009060\\\\\\\\\\\\\\\" (2026-01-21 15:24:20 +0000 UTC to 2026-02-20 15:24:21 +0000 UTC (now=2026-01-21 15:24:36.741651328 +0000 UTC))\\\\\\\"\\\\nF0121 15:24:36.741699 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06cf2acfbc236dc530bb69dff9e6e83755c426daba101b6b6abe9c23684d2b7d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:20Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.869148 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2cec185f78f20cb42969ab7a1c67289c23ab5714e5c397b1fb5ee55540adcc80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.874583 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.874768 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.874979 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.875184 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.875360 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:48Z","lastTransitionTime":"2026-01-21T15:25:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.883732 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:37Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://da2a92bf0daa0e03e89d2a5ff93137363faab6817894aadb90e424b35c878da6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22e8a3282b80d888e687ba06911f278868d7f50c3f0c52745beebc66440deeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.895333 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vg9bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78b7c66b-9935-480c-bf2e-9109b6141006\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9036131b35dd8128019792e8c0100da93fec521e8b003b05f17196b85991d46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t65tk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vg9bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.912152 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-sd7j9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9276d2f11794c73e2d9b67ba12b81ec547e14eb6a5808fb86f64d46a12cffcd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee3a703c230946ab5d32db22cafa6b8a8945566d53e5b97402adaf6701f3addd\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T15:25:29Z\\\",\\\"message\\\":\\\"2026-01-21T15:24:44+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_7c03d55e-f879-4039-b9c2-3221ee89824b\\\\n2026-01-21T15:24:44+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_7c03d55e-f879-4039-b9c2-3221ee89824b to /host/opt/cni/bin/\\\\n2026-01-21T15:24:44Z [verbose] multus-daemon started\\\\n2026-01-21T15:24:44Z [verbose] Readiness Indicator file check\\\\n2026-01-21T15:25:29Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:25:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9rhz8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-sd7j9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.924270 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-597dt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2265b990-9ae6-48a6-b93e-c91bf08a41f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7264e502c2112ab6e5c6ef790a042b5e3fd99d2f1fc117ca3c1245997cabdf59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgbzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://96d3eaf7bd96bfc2526f43d3a02a4d61c9ebfd3562ee0cbbe3a3fe19ddf2429c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wgbzj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-597dt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.940218 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.961243 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dbaeb819c15bc6d2c7a27387f825f6f2b84a9671ca0d4440fc56b98c9bab7b52\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf2232b9e5a3d00ab5e6640b47c6fddffc45b84322505c86450c23e884607366\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee527cf2f3d51077144d81d16dbb6b3563f6e40e30ba92d502b09801ecbca2ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://33a648b0889c361c99d72d947f51b4bf9ed5373bb3192c06f787a46be05694d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3346a404ecc46aaf27f9eebec820eceb15899c2da83fd513d8c1fde406d47ff8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ed7c1837f033493cb08cde88c75467a9fa1dda6ca0f1c72bb2f0c9dccfd0773\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7a8ac23da1f14fa76303b89fa5f285acf38ba3869b2cb7ccc9551612ba86912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a2c749aca14ef61231e1298f1fca331c578ca6eb0ca298d8db61ac45db9a1f6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T15:25:12Z\\\",\\\"message\\\":\\\"-scheduler/openshift-kube-scheduler-crc\\\\nI0121 15:25:12.901764 6701 ovn.go:134] Ensuring zone local for Pod openshift-kube-scheduler/openshift-kube-scheduler-crc in node crc\\\\nI0121 15:25:12.901768 6701 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-scheduler/openshift-kube-scheduler-crc after 0 failed attempt(s)\\\\nI0121 15:25:12.901774 6701 default_network_controller.go:776] Recording success event on pod openshift-kube-scheduler/openshift-kube-scheduler-crc\\\\nI0121 15:25:12.901783 6701 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/multus-additional-cni-plugins-k9hxg\\\\nF0121 15:25:12.901785 6701 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate ha\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:25:11Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7a8ac23da1f14fa76303b89fa5f285acf38ba3869b2cb7ccc9551612ba86912\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-21T15:25:41Z\\\",\\\"message\\\":\\\"rk=default are: map[]\\\\nI0121 15:25:41.146251 7130 lb_config.go:1031] Cluster endpoints for openshift-network-console/networking-console-plugin for network=default are: map[]\\\\nF0121 15:25:41.146225 7130 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:41Z is after 2025-08-24T17:21:41Z]\\\\nI0121 15:25:41.146258 7130 services_controller.go:444] Built service openshift-apiserver-operator/metrics LB per-node configs for network=default: []services.lbConfig(nil)\\\\nI0121 15:25:41.146275 7130 services_controller.go:445] Built service openshift-apiserver-operator/metrics LB template configs for network=default: []services.lbC\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-21T15:25:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb81f970e9b92212535b8961f324f7e9904f3ead656d5aea28d1a7054245ede8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5k67c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-9flhm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.974067 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-dq2bd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"67cee991-0227-45a5-bb2d-226481f03fd1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45a1ed486d5aef56ffd0f6ed50755a384ab94ce452ea9b4d51804fdca6eb179b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8fmw9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:45Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-dq2bd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.977835 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.977880 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.977896 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.977936 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.977953 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:48Z","lastTransitionTime":"2026-01-21T15:25:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:48 crc kubenswrapper[5021]: I0121 15:25:48.986959 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:48Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:49 crc kubenswrapper[5021]: I0121 15:25:49.002877 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-k9hxg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ddf892f9-a048-4335-995e-de581763d230\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d45ae12ddf6d1ac783e31194495a45834964d0379bda6db0655e0194e6f6273b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a81489b5664bd5e4c9fca933d66227f3adb864bf81fbef3ad986bf2ea6f71796\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00e45abe96af11ba56f0b5aa0684fc352630f6048c52ffc5182ac8c9ce22374c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7e44aea17b0a27d8d25e82e6985028d87f30934b25173cc23f4cec2da9f155\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f56fc36439d7c474bb273923ab295b1a247eea18b11233568bb6d1c579aa70f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd98efdf50845afb37e234bd527c4c945df5bf4b5e9b2774adb6a7cc67761b25\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34249e9db1c2f9f8a1d6f840f32de9cfa2b3f071de7a72784674e64607f513b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34249e9db1c2f9f8a1d6f840f32de9cfa2b3f071de7a72784674e64607f513b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-21T15:24:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-21T15:24:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nk6v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-k9hxg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:49Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:49 crc kubenswrapper[5021]: I0121 15:25:49.016512 5021 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-21T15:24:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ce8d981292a75a56750a6c51bf6f523a6b29e0496d5b406455711ab49c4c81a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c5895dd4f412ff99b134a78d67b65a3792a50da3f9a88407af678092a29d5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-21T15:24:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqh6z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-21T15:24:43Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-n22xz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:49Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:49 crc kubenswrapper[5021]: I0121 15:25:49.081175 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:49 crc kubenswrapper[5021]: I0121 15:25:49.081231 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:49 crc kubenswrapper[5021]: I0121 15:25:49.081244 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:49 crc kubenswrapper[5021]: I0121 15:25:49.081261 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:49 crc kubenswrapper[5021]: I0121 15:25:49.081271 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:49Z","lastTransitionTime":"2026-01-21T15:25:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:49 crc kubenswrapper[5021]: I0121 15:25:49.183668 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:49 crc kubenswrapper[5021]: I0121 15:25:49.183736 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:49 crc kubenswrapper[5021]: I0121 15:25:49.183747 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:49 crc kubenswrapper[5021]: I0121 15:25:49.183763 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:49 crc kubenswrapper[5021]: I0121 15:25:49.183774 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:49Z","lastTransitionTime":"2026-01-21T15:25:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:49 crc kubenswrapper[5021]: I0121 15:25:49.286676 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:49 crc kubenswrapper[5021]: I0121 15:25:49.286713 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:49 crc kubenswrapper[5021]: I0121 15:25:49.286724 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:49 crc kubenswrapper[5021]: I0121 15:25:49.286739 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:49 crc kubenswrapper[5021]: I0121 15:25:49.286751 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:49Z","lastTransitionTime":"2026-01-21T15:25:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:49 crc kubenswrapper[5021]: I0121 15:25:49.389816 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:49 crc kubenswrapper[5021]: I0121 15:25:49.389870 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:49 crc kubenswrapper[5021]: I0121 15:25:49.389886 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:49 crc kubenswrapper[5021]: I0121 15:25:49.389933 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:49 crc kubenswrapper[5021]: I0121 15:25:49.389952 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:49Z","lastTransitionTime":"2026-01-21T15:25:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:49 crc kubenswrapper[5021]: I0121 15:25:49.493487 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:49 crc kubenswrapper[5021]: I0121 15:25:49.493542 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:49 crc kubenswrapper[5021]: I0121 15:25:49.493553 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:49 crc kubenswrapper[5021]: I0121 15:25:49.493574 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:49 crc kubenswrapper[5021]: I0121 15:25:49.493592 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:49Z","lastTransitionTime":"2026-01-21T15:25:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:49 crc kubenswrapper[5021]: I0121 15:25:49.595813 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:49 crc kubenswrapper[5021]: I0121 15:25:49.595863 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:49 crc kubenswrapper[5021]: I0121 15:25:49.595932 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:49 crc kubenswrapper[5021]: I0121 15:25:49.595996 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:49 crc kubenswrapper[5021]: I0121 15:25:49.596010 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:49Z","lastTransitionTime":"2026-01-21T15:25:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:49 crc kubenswrapper[5021]: I0121 15:25:49.699089 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:49 crc kubenswrapper[5021]: I0121 15:25:49.699147 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:49 crc kubenswrapper[5021]: I0121 15:25:49.699161 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:49 crc kubenswrapper[5021]: I0121 15:25:49.699181 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:49 crc kubenswrapper[5021]: I0121 15:25:49.699193 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:49Z","lastTransitionTime":"2026-01-21T15:25:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:49 crc kubenswrapper[5021]: I0121 15:25:49.739669 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-30 05:38:05.782592817 +0000 UTC Jan 21 15:25:49 crc kubenswrapper[5021]: I0121 15:25:49.753279 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Jan 21 15:25:49 crc kubenswrapper[5021]: I0121 15:25:49.802024 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:49 crc kubenswrapper[5021]: I0121 15:25:49.802065 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:49 crc kubenswrapper[5021]: I0121 15:25:49.802075 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:49 crc kubenswrapper[5021]: I0121 15:25:49.802091 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:49 crc kubenswrapper[5021]: I0121 15:25:49.802102 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:49Z","lastTransitionTime":"2026-01-21T15:25:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:49 crc kubenswrapper[5021]: I0121 15:25:49.904874 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:49 crc kubenswrapper[5021]: I0121 15:25:49.904951 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:49 crc kubenswrapper[5021]: I0121 15:25:49.904963 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:49 crc kubenswrapper[5021]: I0121 15:25:49.904979 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:49 crc kubenswrapper[5021]: I0121 15:25:49.904992 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:49Z","lastTransitionTime":"2026-01-21T15:25:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.007552 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.007590 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.007598 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.007610 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.007621 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:50Z","lastTransitionTime":"2026-01-21T15:25:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.110112 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.110175 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.110189 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.110210 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.110223 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:50Z","lastTransitionTime":"2026-01-21T15:25:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.212408 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.212514 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.212532 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.212551 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.212562 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:50Z","lastTransitionTime":"2026-01-21T15:25:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.315022 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.315132 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.315148 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.315168 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.315183 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:50Z","lastTransitionTime":"2026-01-21T15:25:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.417392 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.417447 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.417458 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.417473 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.417482 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:50Z","lastTransitionTime":"2026-01-21T15:25:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.520114 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.520157 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.520167 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.520183 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.520195 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:50Z","lastTransitionTime":"2026-01-21T15:25:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.622738 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.622786 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.622800 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.622815 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.622832 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:50Z","lastTransitionTime":"2026-01-21T15:25:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.724990 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.725030 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.725042 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.725059 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.725069 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:50Z","lastTransitionTime":"2026-01-21T15:25:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.737594 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.737652 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.737620 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.737600 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:25:50 crc kubenswrapper[5021]: E0121 15:25:50.737742 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:25:50 crc kubenswrapper[5021]: E0121 15:25:50.737848 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:25:50 crc kubenswrapper[5021]: E0121 15:25:50.737968 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:25:50 crc kubenswrapper[5021]: E0121 15:25:50.738142 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xtd2p" podUID="cb60592c-6770-457b-b2ae-2c6c8f2a4149" Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.739989 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-05 14:13:41.574881491 +0000 UTC Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.827323 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.827362 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.827371 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.827386 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.827397 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:50Z","lastTransitionTime":"2026-01-21T15:25:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.929841 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.929930 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.929941 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.929959 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:50 crc kubenswrapper[5021]: I0121 15:25:50.929969 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:50Z","lastTransitionTime":"2026-01-21T15:25:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:51 crc kubenswrapper[5021]: I0121 15:25:51.032816 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:51 crc kubenswrapper[5021]: I0121 15:25:51.032860 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:51 crc kubenswrapper[5021]: I0121 15:25:51.032875 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:51 crc kubenswrapper[5021]: I0121 15:25:51.032893 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:51 crc kubenswrapper[5021]: I0121 15:25:51.032939 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:51Z","lastTransitionTime":"2026-01-21T15:25:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:51 crc kubenswrapper[5021]: I0121 15:25:51.135017 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:51 crc kubenswrapper[5021]: I0121 15:25:51.135077 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:51 crc kubenswrapper[5021]: I0121 15:25:51.135088 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:51 crc kubenswrapper[5021]: I0121 15:25:51.135106 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:51 crc kubenswrapper[5021]: I0121 15:25:51.135118 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:51Z","lastTransitionTime":"2026-01-21T15:25:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:51 crc kubenswrapper[5021]: I0121 15:25:51.237475 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:51 crc kubenswrapper[5021]: I0121 15:25:51.237535 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:51 crc kubenswrapper[5021]: I0121 15:25:51.237551 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:51 crc kubenswrapper[5021]: I0121 15:25:51.237569 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:51 crc kubenswrapper[5021]: I0121 15:25:51.237579 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:51Z","lastTransitionTime":"2026-01-21T15:25:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:51 crc kubenswrapper[5021]: I0121 15:25:51.340468 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:51 crc kubenswrapper[5021]: I0121 15:25:51.340538 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:51 crc kubenswrapper[5021]: I0121 15:25:51.340555 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:51 crc kubenswrapper[5021]: I0121 15:25:51.340573 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:51 crc kubenswrapper[5021]: I0121 15:25:51.340587 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:51Z","lastTransitionTime":"2026-01-21T15:25:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:51 crc kubenswrapper[5021]: I0121 15:25:51.443246 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:51 crc kubenswrapper[5021]: I0121 15:25:51.443322 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:51 crc kubenswrapper[5021]: I0121 15:25:51.443334 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:51 crc kubenswrapper[5021]: I0121 15:25:51.443373 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:51 crc kubenswrapper[5021]: I0121 15:25:51.443389 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:51Z","lastTransitionTime":"2026-01-21T15:25:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:51 crc kubenswrapper[5021]: I0121 15:25:51.545670 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:51 crc kubenswrapper[5021]: I0121 15:25:51.545721 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:51 crc kubenswrapper[5021]: I0121 15:25:51.545732 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:51 crc kubenswrapper[5021]: I0121 15:25:51.545748 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:51 crc kubenswrapper[5021]: I0121 15:25:51.545757 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:51Z","lastTransitionTime":"2026-01-21T15:25:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:51 crc kubenswrapper[5021]: I0121 15:25:51.648483 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:51 crc kubenswrapper[5021]: I0121 15:25:51.648519 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:51 crc kubenswrapper[5021]: I0121 15:25:51.648528 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:51 crc kubenswrapper[5021]: I0121 15:25:51.648541 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:51 crc kubenswrapper[5021]: I0121 15:25:51.648554 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:51Z","lastTransitionTime":"2026-01-21T15:25:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:51 crc kubenswrapper[5021]: I0121 15:25:51.740903 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-21 19:24:03.242624676 +0000 UTC Jan 21 15:25:51 crc kubenswrapper[5021]: I0121 15:25:51.751482 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:51 crc kubenswrapper[5021]: I0121 15:25:51.751527 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:51 crc kubenswrapper[5021]: I0121 15:25:51.751546 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:51 crc kubenswrapper[5021]: I0121 15:25:51.751576 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:51 crc kubenswrapper[5021]: I0121 15:25:51.751592 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:51Z","lastTransitionTime":"2026-01-21T15:25:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:51 crc kubenswrapper[5021]: I0121 15:25:51.854715 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:51 crc kubenswrapper[5021]: I0121 15:25:51.854785 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:51 crc kubenswrapper[5021]: I0121 15:25:51.854798 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:51 crc kubenswrapper[5021]: I0121 15:25:51.854817 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:51 crc kubenswrapper[5021]: I0121 15:25:51.854830 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:51Z","lastTransitionTime":"2026-01-21T15:25:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:51 crc kubenswrapper[5021]: I0121 15:25:51.959000 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:51 crc kubenswrapper[5021]: I0121 15:25:51.959081 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:51 crc kubenswrapper[5021]: I0121 15:25:51.959099 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:51 crc kubenswrapper[5021]: I0121 15:25:51.959133 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:51 crc kubenswrapper[5021]: I0121 15:25:51.959163 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:51Z","lastTransitionTime":"2026-01-21T15:25:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.027547 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.027636 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.027655 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.027686 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.027714 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:52Z","lastTransitionTime":"2026-01-21T15:25:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:52 crc kubenswrapper[5021]: E0121 15:25:52.055150 5021 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90fd653a-5482-4360-a078-7b7d7b2b9201\\\",\\\"systemUUID\\\":\\\"758eea6e-3652-403c-8df7-2cf690a0b7a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:52Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.060301 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.060341 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.060355 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.060379 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.060397 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:52Z","lastTransitionTime":"2026-01-21T15:25:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:52 crc kubenswrapper[5021]: E0121 15:25:52.081639 5021 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90fd653a-5482-4360-a078-7b7d7b2b9201\\\",\\\"systemUUID\\\":\\\"758eea6e-3652-403c-8df7-2cf690a0b7a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:52Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.086547 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.086592 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.086603 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.086620 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.086631 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:52Z","lastTransitionTime":"2026-01-21T15:25:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:52 crc kubenswrapper[5021]: E0121 15:25:52.099139 5021 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90fd653a-5482-4360-a078-7b7d7b2b9201\\\",\\\"systemUUID\\\":\\\"758eea6e-3652-403c-8df7-2cf690a0b7a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:52Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.103460 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.103498 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.103507 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.103524 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.103535 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:52Z","lastTransitionTime":"2026-01-21T15:25:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:52 crc kubenswrapper[5021]: E0121 15:25:52.116986 5021 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90fd653a-5482-4360-a078-7b7d7b2b9201\\\",\\\"systemUUID\\\":\\\"758eea6e-3652-403c-8df7-2cf690a0b7a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:52Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.122118 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.122184 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.122201 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.122223 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.122241 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:52Z","lastTransitionTime":"2026-01-21T15:25:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:52 crc kubenswrapper[5021]: E0121 15:25:52.136962 5021 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-21T15:25:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-21T15:25:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90fd653a-5482-4360-a078-7b7d7b2b9201\\\",\\\"systemUUID\\\":\\\"758eea6e-3652-403c-8df7-2cf690a0b7a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-21T15:25:52Z is after 2025-08-24T17:21:41Z" Jan 21 15:25:52 crc kubenswrapper[5021]: E0121 15:25:52.137205 5021 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.139422 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.139484 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.139495 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.139516 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.139530 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:52Z","lastTransitionTime":"2026-01-21T15:25:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.243275 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.243328 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.243344 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.243364 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.243378 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:52Z","lastTransitionTime":"2026-01-21T15:25:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.346362 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.346423 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.346435 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.346458 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.346473 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:52Z","lastTransitionTime":"2026-01-21T15:25:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.449459 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.449525 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.449538 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.449598 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.449620 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:52Z","lastTransitionTime":"2026-01-21T15:25:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.551449 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.551482 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.551492 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.551505 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.551514 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:52Z","lastTransitionTime":"2026-01-21T15:25:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.653494 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.653533 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.653543 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.653557 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.653568 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:52Z","lastTransitionTime":"2026-01-21T15:25:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.737794 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.737835 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.737861 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.737804 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:25:52 crc kubenswrapper[5021]: E0121 15:25:52.737951 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:25:52 crc kubenswrapper[5021]: E0121 15:25:52.738130 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:25:52 crc kubenswrapper[5021]: E0121 15:25:52.738211 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:25:52 crc kubenswrapper[5021]: E0121 15:25:52.738267 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xtd2p" podUID="cb60592c-6770-457b-b2ae-2c6c8f2a4149" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.741102 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-16 09:30:50.494096978 +0000 UTC Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.755364 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.755393 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.755402 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.755414 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.755423 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:52Z","lastTransitionTime":"2026-01-21T15:25:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.857339 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.857379 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.857447 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.857464 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.857476 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:52Z","lastTransitionTime":"2026-01-21T15:25:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.959535 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.959610 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.959623 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.959637 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:52 crc kubenswrapper[5021]: I0121 15:25:52.959648 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:52Z","lastTransitionTime":"2026-01-21T15:25:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:53 crc kubenswrapper[5021]: I0121 15:25:53.061338 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:53 crc kubenswrapper[5021]: I0121 15:25:53.061421 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:53 crc kubenswrapper[5021]: I0121 15:25:53.061439 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:53 crc kubenswrapper[5021]: I0121 15:25:53.061464 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:53 crc kubenswrapper[5021]: I0121 15:25:53.061481 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:53Z","lastTransitionTime":"2026-01-21T15:25:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:53 crc kubenswrapper[5021]: I0121 15:25:53.165171 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:53 crc kubenswrapper[5021]: I0121 15:25:53.165215 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:53 crc kubenswrapper[5021]: I0121 15:25:53.165225 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:53 crc kubenswrapper[5021]: I0121 15:25:53.165242 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:53 crc kubenswrapper[5021]: I0121 15:25:53.165253 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:53Z","lastTransitionTime":"2026-01-21T15:25:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:53 crc kubenswrapper[5021]: I0121 15:25:53.267330 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:53 crc kubenswrapper[5021]: I0121 15:25:53.267411 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:53 crc kubenswrapper[5021]: I0121 15:25:53.267442 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:53 crc kubenswrapper[5021]: I0121 15:25:53.267469 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:53 crc kubenswrapper[5021]: I0121 15:25:53.267490 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:53Z","lastTransitionTime":"2026-01-21T15:25:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:53 crc kubenswrapper[5021]: I0121 15:25:53.370031 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:53 crc kubenswrapper[5021]: I0121 15:25:53.370156 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:53 crc kubenswrapper[5021]: I0121 15:25:53.370176 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:53 crc kubenswrapper[5021]: I0121 15:25:53.370195 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:53 crc kubenswrapper[5021]: I0121 15:25:53.370208 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:53Z","lastTransitionTime":"2026-01-21T15:25:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:53 crc kubenswrapper[5021]: I0121 15:25:53.472691 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:53 crc kubenswrapper[5021]: I0121 15:25:53.472756 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:53 crc kubenswrapper[5021]: I0121 15:25:53.472767 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:53 crc kubenswrapper[5021]: I0121 15:25:53.472785 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:53 crc kubenswrapper[5021]: I0121 15:25:53.472798 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:53Z","lastTransitionTime":"2026-01-21T15:25:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:53 crc kubenswrapper[5021]: I0121 15:25:53.575416 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:53 crc kubenswrapper[5021]: I0121 15:25:53.575451 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:53 crc kubenswrapper[5021]: I0121 15:25:53.575460 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:53 crc kubenswrapper[5021]: I0121 15:25:53.575473 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:53 crc kubenswrapper[5021]: I0121 15:25:53.575482 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:53Z","lastTransitionTime":"2026-01-21T15:25:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:53 crc kubenswrapper[5021]: I0121 15:25:53.678443 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:53 crc kubenswrapper[5021]: I0121 15:25:53.678487 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:53 crc kubenswrapper[5021]: I0121 15:25:53.678500 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:53 crc kubenswrapper[5021]: I0121 15:25:53.678518 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:53 crc kubenswrapper[5021]: I0121 15:25:53.678530 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:53Z","lastTransitionTime":"2026-01-21T15:25:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:53 crc kubenswrapper[5021]: I0121 15:25:53.741606 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-10 19:01:50.975644523 +0000 UTC Jan 21 15:25:53 crc kubenswrapper[5021]: I0121 15:25:53.780582 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:53 crc kubenswrapper[5021]: I0121 15:25:53.780621 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:53 crc kubenswrapper[5021]: I0121 15:25:53.780635 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:53 crc kubenswrapper[5021]: I0121 15:25:53.780650 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:53 crc kubenswrapper[5021]: I0121 15:25:53.780661 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:53Z","lastTransitionTime":"2026-01-21T15:25:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:53 crc kubenswrapper[5021]: I0121 15:25:53.882654 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:53 crc kubenswrapper[5021]: I0121 15:25:53.882689 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:53 crc kubenswrapper[5021]: I0121 15:25:53.882699 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:53 crc kubenswrapper[5021]: I0121 15:25:53.882714 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:53 crc kubenswrapper[5021]: I0121 15:25:53.882727 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:53Z","lastTransitionTime":"2026-01-21T15:25:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:53 crc kubenswrapper[5021]: I0121 15:25:53.984876 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:53 crc kubenswrapper[5021]: I0121 15:25:53.984962 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:53 crc kubenswrapper[5021]: I0121 15:25:53.984971 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:53 crc kubenswrapper[5021]: I0121 15:25:53.984984 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:53 crc kubenswrapper[5021]: I0121 15:25:53.984993 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:53Z","lastTransitionTime":"2026-01-21T15:25:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:54 crc kubenswrapper[5021]: I0121 15:25:54.086859 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:54 crc kubenswrapper[5021]: I0121 15:25:54.086956 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:54 crc kubenswrapper[5021]: I0121 15:25:54.086970 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:54 crc kubenswrapper[5021]: I0121 15:25:54.086984 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:54 crc kubenswrapper[5021]: I0121 15:25:54.086996 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:54Z","lastTransitionTime":"2026-01-21T15:25:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:54 crc kubenswrapper[5021]: I0121 15:25:54.188805 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:54 crc kubenswrapper[5021]: I0121 15:25:54.188841 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:54 crc kubenswrapper[5021]: I0121 15:25:54.188851 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:54 crc kubenswrapper[5021]: I0121 15:25:54.188862 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:54 crc kubenswrapper[5021]: I0121 15:25:54.188870 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:54Z","lastTransitionTime":"2026-01-21T15:25:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:54 crc kubenswrapper[5021]: I0121 15:25:54.291380 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:54 crc kubenswrapper[5021]: I0121 15:25:54.291426 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:54 crc kubenswrapper[5021]: I0121 15:25:54.291436 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:54 crc kubenswrapper[5021]: I0121 15:25:54.291449 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:54 crc kubenswrapper[5021]: I0121 15:25:54.291461 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:54Z","lastTransitionTime":"2026-01-21T15:25:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:54 crc kubenswrapper[5021]: I0121 15:25:54.395190 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:54 crc kubenswrapper[5021]: I0121 15:25:54.395232 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:54 crc kubenswrapper[5021]: I0121 15:25:54.395244 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:54 crc kubenswrapper[5021]: I0121 15:25:54.395264 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:54 crc kubenswrapper[5021]: I0121 15:25:54.395278 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:54Z","lastTransitionTime":"2026-01-21T15:25:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:54 crc kubenswrapper[5021]: I0121 15:25:54.498637 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:54 crc kubenswrapper[5021]: I0121 15:25:54.498688 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:54 crc kubenswrapper[5021]: I0121 15:25:54.498700 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:54 crc kubenswrapper[5021]: I0121 15:25:54.498763 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:54 crc kubenswrapper[5021]: I0121 15:25:54.498778 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:54Z","lastTransitionTime":"2026-01-21T15:25:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:54 crc kubenswrapper[5021]: I0121 15:25:54.601488 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:54 crc kubenswrapper[5021]: I0121 15:25:54.601547 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:54 crc kubenswrapper[5021]: I0121 15:25:54.601557 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:54 crc kubenswrapper[5021]: I0121 15:25:54.601573 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:54 crc kubenswrapper[5021]: I0121 15:25:54.601586 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:54Z","lastTransitionTime":"2026-01-21T15:25:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:54 crc kubenswrapper[5021]: I0121 15:25:54.704396 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:54 crc kubenswrapper[5021]: I0121 15:25:54.704457 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:54 crc kubenswrapper[5021]: I0121 15:25:54.704474 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:54 crc kubenswrapper[5021]: I0121 15:25:54.704493 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:54 crc kubenswrapper[5021]: I0121 15:25:54.704504 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:54Z","lastTransitionTime":"2026-01-21T15:25:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:54 crc kubenswrapper[5021]: I0121 15:25:54.737765 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:25:54 crc kubenswrapper[5021]: E0121 15:25:54.737946 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:25:54 crc kubenswrapper[5021]: I0121 15:25:54.738019 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:25:54 crc kubenswrapper[5021]: I0121 15:25:54.738084 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:25:54 crc kubenswrapper[5021]: I0121 15:25:54.738145 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:25:54 crc kubenswrapper[5021]: E0121 15:25:54.738242 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xtd2p" podUID="cb60592c-6770-457b-b2ae-2c6c8f2a4149" Jan 21 15:25:54 crc kubenswrapper[5021]: E0121 15:25:54.738534 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:25:54 crc kubenswrapper[5021]: E0121 15:25:54.738560 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:25:54 crc kubenswrapper[5021]: I0121 15:25:54.741802 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-21 18:47:29.640991836 +0000 UTC Jan 21 15:25:54 crc kubenswrapper[5021]: I0121 15:25:54.807461 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:54 crc kubenswrapper[5021]: I0121 15:25:54.807514 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:54 crc kubenswrapper[5021]: I0121 15:25:54.807526 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:54 crc kubenswrapper[5021]: I0121 15:25:54.807546 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:54 crc kubenswrapper[5021]: I0121 15:25:54.807560 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:54Z","lastTransitionTime":"2026-01-21T15:25:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:54 crc kubenswrapper[5021]: I0121 15:25:54.910563 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:54 crc kubenswrapper[5021]: I0121 15:25:54.910630 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:54 crc kubenswrapper[5021]: I0121 15:25:54.910643 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:54 crc kubenswrapper[5021]: I0121 15:25:54.910666 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:54 crc kubenswrapper[5021]: I0121 15:25:54.910682 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:54Z","lastTransitionTime":"2026-01-21T15:25:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:55 crc kubenswrapper[5021]: I0121 15:25:55.013958 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:55 crc kubenswrapper[5021]: I0121 15:25:55.014011 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:55 crc kubenswrapper[5021]: I0121 15:25:55.014023 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:55 crc kubenswrapper[5021]: I0121 15:25:55.014040 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:55 crc kubenswrapper[5021]: I0121 15:25:55.014053 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:55Z","lastTransitionTime":"2026-01-21T15:25:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:55 crc kubenswrapper[5021]: I0121 15:25:55.117175 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:55 crc kubenswrapper[5021]: I0121 15:25:55.117267 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:55 crc kubenswrapper[5021]: I0121 15:25:55.117299 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:55 crc kubenswrapper[5021]: I0121 15:25:55.117329 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:55 crc kubenswrapper[5021]: I0121 15:25:55.117351 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:55Z","lastTransitionTime":"2026-01-21T15:25:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:55 crc kubenswrapper[5021]: I0121 15:25:55.220507 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:55 crc kubenswrapper[5021]: I0121 15:25:55.220558 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:55 crc kubenswrapper[5021]: I0121 15:25:55.220570 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:55 crc kubenswrapper[5021]: I0121 15:25:55.220590 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:55 crc kubenswrapper[5021]: I0121 15:25:55.220603 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:55Z","lastTransitionTime":"2026-01-21T15:25:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:55 crc kubenswrapper[5021]: I0121 15:25:55.323166 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:55 crc kubenswrapper[5021]: I0121 15:25:55.323258 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:55 crc kubenswrapper[5021]: I0121 15:25:55.323276 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:55 crc kubenswrapper[5021]: I0121 15:25:55.323299 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:55 crc kubenswrapper[5021]: I0121 15:25:55.323313 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:55Z","lastTransitionTime":"2026-01-21T15:25:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:55 crc kubenswrapper[5021]: I0121 15:25:55.426330 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:55 crc kubenswrapper[5021]: I0121 15:25:55.426381 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:55 crc kubenswrapper[5021]: I0121 15:25:55.426391 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:55 crc kubenswrapper[5021]: I0121 15:25:55.426414 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:55 crc kubenswrapper[5021]: I0121 15:25:55.426427 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:55Z","lastTransitionTime":"2026-01-21T15:25:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:55 crc kubenswrapper[5021]: I0121 15:25:55.529405 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:55 crc kubenswrapper[5021]: I0121 15:25:55.529481 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:55 crc kubenswrapper[5021]: I0121 15:25:55.529492 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:55 crc kubenswrapper[5021]: I0121 15:25:55.529518 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:55 crc kubenswrapper[5021]: I0121 15:25:55.529530 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:55Z","lastTransitionTime":"2026-01-21T15:25:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:55 crc kubenswrapper[5021]: I0121 15:25:55.632805 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:55 crc kubenswrapper[5021]: I0121 15:25:55.632857 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:55 crc kubenswrapper[5021]: I0121 15:25:55.632874 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:55 crc kubenswrapper[5021]: I0121 15:25:55.632892 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:55 crc kubenswrapper[5021]: I0121 15:25:55.632926 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:55Z","lastTransitionTime":"2026-01-21T15:25:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:55 crc kubenswrapper[5021]: I0121 15:25:55.740344 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:55 crc kubenswrapper[5021]: I0121 15:25:55.740421 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:55 crc kubenswrapper[5021]: I0121 15:25:55.740433 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:55 crc kubenswrapper[5021]: I0121 15:25:55.740456 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:55 crc kubenswrapper[5021]: I0121 15:25:55.740471 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:55Z","lastTransitionTime":"2026-01-21T15:25:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:55 crc kubenswrapper[5021]: I0121 15:25:55.744070 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-18 11:07:07.490242378 +0000 UTC Jan 21 15:25:55 crc kubenswrapper[5021]: I0121 15:25:55.842706 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:55 crc kubenswrapper[5021]: I0121 15:25:55.842786 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:55 crc kubenswrapper[5021]: I0121 15:25:55.842799 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:55 crc kubenswrapper[5021]: I0121 15:25:55.842817 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:55 crc kubenswrapper[5021]: I0121 15:25:55.842829 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:55Z","lastTransitionTime":"2026-01-21T15:25:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:55 crc kubenswrapper[5021]: I0121 15:25:55.945134 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:55 crc kubenswrapper[5021]: I0121 15:25:55.945182 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:55 crc kubenswrapper[5021]: I0121 15:25:55.945192 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:55 crc kubenswrapper[5021]: I0121 15:25:55.945215 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:55 crc kubenswrapper[5021]: I0121 15:25:55.945226 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:55Z","lastTransitionTime":"2026-01-21T15:25:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.047509 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.047559 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.047570 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.047610 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.047629 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:56Z","lastTransitionTime":"2026-01-21T15:25:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.149110 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.149151 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.149162 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.149178 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.149189 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:56Z","lastTransitionTime":"2026-01-21T15:25:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.251456 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.251503 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.251513 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.251528 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.251538 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:56Z","lastTransitionTime":"2026-01-21T15:25:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.354162 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.354228 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.354242 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.354258 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.354268 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:56Z","lastTransitionTime":"2026-01-21T15:25:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.456581 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.456629 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.456640 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.456656 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.456669 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:56Z","lastTransitionTime":"2026-01-21T15:25:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.558664 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.558714 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.558731 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.558751 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.558761 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:56Z","lastTransitionTime":"2026-01-21T15:25:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.665115 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.665178 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.665189 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.665204 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.665214 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:56Z","lastTransitionTime":"2026-01-21T15:25:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.737692 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.737751 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:25:56 crc kubenswrapper[5021]: E0121 15:25:56.737850 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xtd2p" podUID="cb60592c-6770-457b-b2ae-2c6c8f2a4149" Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.737692 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:25:56 crc kubenswrapper[5021]: E0121 15:25:56.737983 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.738165 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:25:56 crc kubenswrapper[5021]: E0121 15:25:56.738259 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:25:56 crc kubenswrapper[5021]: E0121 15:25:56.738439 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.744684 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-27 22:13:39.791074024 +0000 UTC Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.767684 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.767722 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.767731 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.767746 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.767755 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:56Z","lastTransitionTime":"2026-01-21T15:25:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.870508 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.870547 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.870557 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.870570 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.870579 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:56Z","lastTransitionTime":"2026-01-21T15:25:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.974112 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.974203 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.974234 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.974264 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:56 crc kubenswrapper[5021]: I0121 15:25:56.974286 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:56Z","lastTransitionTime":"2026-01-21T15:25:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.077590 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.077662 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.077685 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.077711 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.077736 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:57Z","lastTransitionTime":"2026-01-21T15:25:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.180557 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.180609 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.180627 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.180646 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.180657 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:57Z","lastTransitionTime":"2026-01-21T15:25:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.283196 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.283247 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.283260 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.283276 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.283287 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:57Z","lastTransitionTime":"2026-01-21T15:25:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.385452 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.385483 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.385491 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.385503 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.385512 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:57Z","lastTransitionTime":"2026-01-21T15:25:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.488193 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.488241 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.488253 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.488270 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.488281 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:57Z","lastTransitionTime":"2026-01-21T15:25:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.591023 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.591061 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.591070 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.591083 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.591092 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:57Z","lastTransitionTime":"2026-01-21T15:25:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.693118 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.693152 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.693161 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.693181 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.693191 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:57Z","lastTransitionTime":"2026-01-21T15:25:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.738772 5021 scope.go:117] "RemoveContainer" containerID="c7a8ac23da1f14fa76303b89fa5f285acf38ba3869b2cb7ccc9551612ba86912" Jan 21 15:25:57 crc kubenswrapper[5021]: E0121 15:25:57.739051 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-9flhm_openshift-ovn-kubernetes(2e3f3965-4473-46d7-a613-2ed3e4b10ad7)\"" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.745306 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-04 10:15:21.549606055 +0000 UTC Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.758492 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-k9hxg" podStartSLOduration=75.7584751 podStartE2EDuration="1m15.7584751s" podCreationTimestamp="2026-01-21 15:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:25:57.75812469 +0000 UTC m=+99.293238589" watchObservedRunningTime="2026-01-21 15:25:57.7584751 +0000 UTC m=+99.293588979" Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.783823 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podStartSLOduration=75.783771141 podStartE2EDuration="1m15.783771141s" podCreationTimestamp="2026-01-21 15:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:25:57.771016633 +0000 UTC m=+99.306130532" watchObservedRunningTime="2026-01-21 15:25:57.783771141 +0000 UTC m=+99.318885030" Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.795069 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.795113 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.795121 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.795196 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.795208 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:57Z","lastTransitionTime":"2026-01-21T15:25:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.812064 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=23.812044994 podStartE2EDuration="23.812044994s" podCreationTimestamp="2026-01-21 15:25:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:25:57.809063553 +0000 UTC m=+99.344177442" watchObservedRunningTime="2026-01-21 15:25:57.812044994 +0000 UTC m=+99.347158883" Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.846007 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=53.845989613 podStartE2EDuration="53.845989613s" podCreationTimestamp="2026-01-21 15:25:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:25:57.845193231 +0000 UTC m=+99.380307120" watchObservedRunningTime="2026-01-21 15:25:57.845989613 +0000 UTC m=+99.381103502" Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.846246 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=81.846241999 podStartE2EDuration="1m21.846241999s" podCreationTimestamp="2026-01-21 15:24:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:25:57.827564349 +0000 UTC m=+99.362678228" watchObservedRunningTime="2026-01-21 15:25:57.846241999 +0000 UTC m=+99.381355888" Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.892047 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=8.892031272 podStartE2EDuration="8.892031272s" podCreationTimestamp="2026-01-21 15:25:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:25:57.891544358 +0000 UTC m=+99.426658247" watchObservedRunningTime="2026-01-21 15:25:57.892031272 +0000 UTC m=+99.427145161" Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.897953 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.897991 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.897999 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.898012 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.898024 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:57Z","lastTransitionTime":"2026-01-21T15:25:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.929949 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-vg9bt" podStartSLOduration=75.929884336 podStartE2EDuration="1m15.929884336s" podCreationTimestamp="2026-01-21 15:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:25:57.929567227 +0000 UTC m=+99.464681116" watchObservedRunningTime="2026-01-21 15:25:57.929884336 +0000 UTC m=+99.464998245" Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.944595 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-sd7j9" podStartSLOduration=75.944572858 podStartE2EDuration="1m15.944572858s" podCreationTimestamp="2026-01-21 15:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:25:57.943970231 +0000 UTC m=+99.479084140" watchObservedRunningTime="2026-01-21 15:25:57.944572858 +0000 UTC m=+99.479686747" Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.979754 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=81.979729999 podStartE2EDuration="1m21.979729999s" podCreationTimestamp="2026-01-21 15:24:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:25:57.979660067 +0000 UTC m=+99.514773956" watchObservedRunningTime="2026-01-21 15:25:57.979729999 +0000 UTC m=+99.514843898" Jan 21 15:25:57 crc kubenswrapper[5021]: I0121 15:25:57.980015 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-597dt" podStartSLOduration=74.980008406 podStartE2EDuration="1m14.980008406s" podCreationTimestamp="2026-01-21 15:24:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:25:57.958061547 +0000 UTC m=+99.493175436" watchObservedRunningTime="2026-01-21 15:25:57.980008406 +0000 UTC m=+99.515122295" Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.000037 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.000091 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.000102 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.000119 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.000130 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:58Z","lastTransitionTime":"2026-01-21T15:25:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.021977 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-dq2bd" podStartSLOduration=76.021956763 podStartE2EDuration="1m16.021956763s" podCreationTimestamp="2026-01-21 15:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:25:58.021668195 +0000 UTC m=+99.556782084" watchObservedRunningTime="2026-01-21 15:25:58.021956763 +0000 UTC m=+99.557070652" Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.102520 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.102561 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.102571 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.102590 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.102601 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:58Z","lastTransitionTime":"2026-01-21T15:25:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.204853 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.204895 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.204926 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.204943 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.204954 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:58Z","lastTransitionTime":"2026-01-21T15:25:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.307975 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.308028 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.308040 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.308056 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.308067 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:58Z","lastTransitionTime":"2026-01-21T15:25:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.410114 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.410153 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.410162 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.410176 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.410186 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:58Z","lastTransitionTime":"2026-01-21T15:25:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.512678 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.512716 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.512726 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.512741 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.512754 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:58Z","lastTransitionTime":"2026-01-21T15:25:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.615383 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.615422 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.615430 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.615442 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.615451 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:58Z","lastTransitionTime":"2026-01-21T15:25:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.718196 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.718273 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.718292 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.718309 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.718319 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:58Z","lastTransitionTime":"2026-01-21T15:25:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.737623 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.738585 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:25:58 crc kubenswrapper[5021]: E0121 15:25:58.738585 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xtd2p" podUID="cb60592c-6770-457b-b2ae-2c6c8f2a4149" Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.738652 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.738660 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:25:58 crc kubenswrapper[5021]: E0121 15:25:58.738774 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:25:58 crc kubenswrapper[5021]: E0121 15:25:58.738949 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:25:58 crc kubenswrapper[5021]: E0121 15:25:58.738989 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.745743 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-13 22:04:45.824034872 +0000 UTC Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.820295 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.820326 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.820334 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.820347 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.820356 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:58Z","lastTransitionTime":"2026-01-21T15:25:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.923124 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.923196 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.923207 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.923223 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:58 crc kubenswrapper[5021]: I0121 15:25:58.923236 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:58Z","lastTransitionTime":"2026-01-21T15:25:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:59 crc kubenswrapper[5021]: I0121 15:25:59.025615 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:59 crc kubenswrapper[5021]: I0121 15:25:59.025685 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:59 crc kubenswrapper[5021]: I0121 15:25:59.025696 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:59 crc kubenswrapper[5021]: I0121 15:25:59.025727 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:59 crc kubenswrapper[5021]: I0121 15:25:59.025740 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:59Z","lastTransitionTime":"2026-01-21T15:25:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:59 crc kubenswrapper[5021]: I0121 15:25:59.128658 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:59 crc kubenswrapper[5021]: I0121 15:25:59.128706 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:59 crc kubenswrapper[5021]: I0121 15:25:59.128718 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:59 crc kubenswrapper[5021]: I0121 15:25:59.128737 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:59 crc kubenswrapper[5021]: I0121 15:25:59.128747 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:59Z","lastTransitionTime":"2026-01-21T15:25:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:59 crc kubenswrapper[5021]: I0121 15:25:59.231018 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:59 crc kubenswrapper[5021]: I0121 15:25:59.231074 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:59 crc kubenswrapper[5021]: I0121 15:25:59.231086 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:59 crc kubenswrapper[5021]: I0121 15:25:59.231103 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:59 crc kubenswrapper[5021]: I0121 15:25:59.231116 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:59Z","lastTransitionTime":"2026-01-21T15:25:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:59 crc kubenswrapper[5021]: I0121 15:25:59.333441 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:59 crc kubenswrapper[5021]: I0121 15:25:59.333490 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:59 crc kubenswrapper[5021]: I0121 15:25:59.333501 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:59 crc kubenswrapper[5021]: I0121 15:25:59.333518 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:59 crc kubenswrapper[5021]: I0121 15:25:59.333531 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:59Z","lastTransitionTime":"2026-01-21T15:25:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:59 crc kubenswrapper[5021]: I0121 15:25:59.436495 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:59 crc kubenswrapper[5021]: I0121 15:25:59.436541 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:59 crc kubenswrapper[5021]: I0121 15:25:59.436554 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:59 crc kubenswrapper[5021]: I0121 15:25:59.436573 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:59 crc kubenswrapper[5021]: I0121 15:25:59.436585 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:59Z","lastTransitionTime":"2026-01-21T15:25:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:59 crc kubenswrapper[5021]: I0121 15:25:59.539029 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:59 crc kubenswrapper[5021]: I0121 15:25:59.539083 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:59 crc kubenswrapper[5021]: I0121 15:25:59.539093 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:59 crc kubenswrapper[5021]: I0121 15:25:59.539109 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:59 crc kubenswrapper[5021]: I0121 15:25:59.539125 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:59Z","lastTransitionTime":"2026-01-21T15:25:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:59 crc kubenswrapper[5021]: I0121 15:25:59.641636 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:59 crc kubenswrapper[5021]: I0121 15:25:59.641696 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:59 crc kubenswrapper[5021]: I0121 15:25:59.641708 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:59 crc kubenswrapper[5021]: I0121 15:25:59.641730 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:59 crc kubenswrapper[5021]: I0121 15:25:59.641747 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:59Z","lastTransitionTime":"2026-01-21T15:25:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:59 crc kubenswrapper[5021]: I0121 15:25:59.745479 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:59 crc kubenswrapper[5021]: I0121 15:25:59.745536 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:59 crc kubenswrapper[5021]: I0121 15:25:59.745546 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:59 crc kubenswrapper[5021]: I0121 15:25:59.745566 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:59 crc kubenswrapper[5021]: I0121 15:25:59.745580 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:59Z","lastTransitionTime":"2026-01-21T15:25:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:59 crc kubenswrapper[5021]: I0121 15:25:59.746472 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-22 08:55:06.921827247 +0000 UTC Jan 21 15:25:59 crc kubenswrapper[5021]: I0121 15:25:59.847876 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:59 crc kubenswrapper[5021]: I0121 15:25:59.847984 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:59 crc kubenswrapper[5021]: I0121 15:25:59.847996 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:59 crc kubenswrapper[5021]: I0121 15:25:59.848012 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:59 crc kubenswrapper[5021]: I0121 15:25:59.848023 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:59Z","lastTransitionTime":"2026-01-21T15:25:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:25:59 crc kubenswrapper[5021]: I0121 15:25:59.951391 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:25:59 crc kubenswrapper[5021]: I0121 15:25:59.951448 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:25:59 crc kubenswrapper[5021]: I0121 15:25:59.951459 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:25:59 crc kubenswrapper[5021]: I0121 15:25:59.951477 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:25:59 crc kubenswrapper[5021]: I0121 15:25:59.951494 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:25:59Z","lastTransitionTime":"2026-01-21T15:25:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.054144 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.054192 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.054201 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.054216 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.054225 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:26:00Z","lastTransitionTime":"2026-01-21T15:26:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.157392 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.157435 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.157448 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.157464 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.157473 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:26:00Z","lastTransitionTime":"2026-01-21T15:26:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.260980 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.261049 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.261066 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.261094 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.261119 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:26:00Z","lastTransitionTime":"2026-01-21T15:26:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.364111 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.364167 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.364179 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.364201 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.364215 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:26:00Z","lastTransitionTime":"2026-01-21T15:26:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.466918 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.466979 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.466991 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.467006 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.467015 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:26:00Z","lastTransitionTime":"2026-01-21T15:26:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.569573 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.569629 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.569641 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.569657 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.569667 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:26:00Z","lastTransitionTime":"2026-01-21T15:26:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.672706 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.672750 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.672760 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.672773 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.672784 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:26:00Z","lastTransitionTime":"2026-01-21T15:26:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.737381 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.737498 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:26:00 crc kubenswrapper[5021]: E0121 15:26:00.737597 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.737531 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:26:00 crc kubenswrapper[5021]: E0121 15:26:00.737708 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.737499 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:26:00 crc kubenswrapper[5021]: E0121 15:26:00.737834 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xtd2p" podUID="cb60592c-6770-457b-b2ae-2c6c8f2a4149" Jan 21 15:26:00 crc kubenswrapper[5021]: E0121 15:26:00.737972 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.747633 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-12 17:40:19.518306909 +0000 UTC Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.775321 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.775368 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.775380 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.775396 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.775409 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:26:00Z","lastTransitionTime":"2026-01-21T15:26:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.862571 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/cb60592c-6770-457b-b2ae-2c6c8f2a4149-metrics-certs\") pod \"network-metrics-daemon-xtd2p\" (UID: \"cb60592c-6770-457b-b2ae-2c6c8f2a4149\") " pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:26:00 crc kubenswrapper[5021]: E0121 15:26:00.862721 5021 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 21 15:26:00 crc kubenswrapper[5021]: E0121 15:26:00.862802 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cb60592c-6770-457b-b2ae-2c6c8f2a4149-metrics-certs podName:cb60592c-6770-457b-b2ae-2c6c8f2a4149 nodeName:}" failed. No retries permitted until 2026-01-21 15:27:04.862782019 +0000 UTC m=+166.397895908 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/cb60592c-6770-457b-b2ae-2c6c8f2a4149-metrics-certs") pod "network-metrics-daemon-xtd2p" (UID: "cb60592c-6770-457b-b2ae-2c6c8f2a4149") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.878837 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.878882 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.878892 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.878922 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.878932 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:26:00Z","lastTransitionTime":"2026-01-21T15:26:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.982432 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.982768 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.982869 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.983013 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:26:00 crc kubenswrapper[5021]: I0121 15:26:00.983156 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:26:00Z","lastTransitionTime":"2026-01-21T15:26:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:26:01 crc kubenswrapper[5021]: I0121 15:26:01.086062 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:26:01 crc kubenswrapper[5021]: I0121 15:26:01.086121 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:26:01 crc kubenswrapper[5021]: I0121 15:26:01.086134 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:26:01 crc kubenswrapper[5021]: I0121 15:26:01.086154 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:26:01 crc kubenswrapper[5021]: I0121 15:26:01.086168 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:26:01Z","lastTransitionTime":"2026-01-21T15:26:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:26:01 crc kubenswrapper[5021]: I0121 15:26:01.188160 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:26:01 crc kubenswrapper[5021]: I0121 15:26:01.188206 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:26:01 crc kubenswrapper[5021]: I0121 15:26:01.188218 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:26:01 crc kubenswrapper[5021]: I0121 15:26:01.188234 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:26:01 crc kubenswrapper[5021]: I0121 15:26:01.188247 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:26:01Z","lastTransitionTime":"2026-01-21T15:26:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:26:01 crc kubenswrapper[5021]: I0121 15:26:01.290332 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:26:01 crc kubenswrapper[5021]: I0121 15:26:01.290400 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:26:01 crc kubenswrapper[5021]: I0121 15:26:01.290411 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:26:01 crc kubenswrapper[5021]: I0121 15:26:01.290430 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:26:01 crc kubenswrapper[5021]: I0121 15:26:01.290442 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:26:01Z","lastTransitionTime":"2026-01-21T15:26:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:26:01 crc kubenswrapper[5021]: I0121 15:26:01.392975 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:26:01 crc kubenswrapper[5021]: I0121 15:26:01.393010 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:26:01 crc kubenswrapper[5021]: I0121 15:26:01.393018 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:26:01 crc kubenswrapper[5021]: I0121 15:26:01.393033 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:26:01 crc kubenswrapper[5021]: I0121 15:26:01.393041 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:26:01Z","lastTransitionTime":"2026-01-21T15:26:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:26:01 crc kubenswrapper[5021]: I0121 15:26:01.496466 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:26:01 crc kubenswrapper[5021]: I0121 15:26:01.496506 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:26:01 crc kubenswrapper[5021]: I0121 15:26:01.496516 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:26:01 crc kubenswrapper[5021]: I0121 15:26:01.496535 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:26:01 crc kubenswrapper[5021]: I0121 15:26:01.496545 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:26:01Z","lastTransitionTime":"2026-01-21T15:26:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:26:01 crc kubenswrapper[5021]: I0121 15:26:01.599232 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:26:01 crc kubenswrapper[5021]: I0121 15:26:01.599292 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:26:01 crc kubenswrapper[5021]: I0121 15:26:01.599308 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:26:01 crc kubenswrapper[5021]: I0121 15:26:01.599329 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:26:01 crc kubenswrapper[5021]: I0121 15:26:01.599340 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:26:01Z","lastTransitionTime":"2026-01-21T15:26:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:26:01 crc kubenswrapper[5021]: I0121 15:26:01.702588 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:26:01 crc kubenswrapper[5021]: I0121 15:26:01.702650 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:26:01 crc kubenswrapper[5021]: I0121 15:26:01.702661 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:26:01 crc kubenswrapper[5021]: I0121 15:26:01.702681 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:26:01 crc kubenswrapper[5021]: I0121 15:26:01.702694 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:26:01Z","lastTransitionTime":"2026-01-21T15:26:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:26:01 crc kubenswrapper[5021]: I0121 15:26:01.748324 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-18 10:47:50.954426285 +0000 UTC Jan 21 15:26:01 crc kubenswrapper[5021]: I0121 15:26:01.805993 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:26:01 crc kubenswrapper[5021]: I0121 15:26:01.806045 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:26:01 crc kubenswrapper[5021]: I0121 15:26:01.806059 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:26:01 crc kubenswrapper[5021]: I0121 15:26:01.806077 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:26:01 crc kubenswrapper[5021]: I0121 15:26:01.806092 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:26:01Z","lastTransitionTime":"2026-01-21T15:26:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:26:01 crc kubenswrapper[5021]: I0121 15:26:01.908529 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:26:01 crc kubenswrapper[5021]: I0121 15:26:01.908575 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:26:01 crc kubenswrapper[5021]: I0121 15:26:01.908587 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:26:01 crc kubenswrapper[5021]: I0121 15:26:01.908607 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:26:01 crc kubenswrapper[5021]: I0121 15:26:01.908620 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:26:01Z","lastTransitionTime":"2026-01-21T15:26:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.010931 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.010989 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.011000 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.011019 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.011031 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:26:02Z","lastTransitionTime":"2026-01-21T15:26:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.114042 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.114089 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.114101 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.114117 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.114129 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:26:02Z","lastTransitionTime":"2026-01-21T15:26:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.216848 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.216896 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.216931 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.216948 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.216959 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:26:02Z","lastTransitionTime":"2026-01-21T15:26:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.318948 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.318992 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.319004 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.319021 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.319032 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:26:02Z","lastTransitionTime":"2026-01-21T15:26:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.425824 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.426142 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.426278 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.426354 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.426426 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:26:02Z","lastTransitionTime":"2026-01-21T15:26:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.504368 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.504719 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.504831 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.505290 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.505402 5021 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-21T15:26:02Z","lastTransitionTime":"2026-01-21T15:26:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.557580 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-bzx9x"] Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.557981 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bzx9x" Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.560173 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.560215 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.564658 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.569498 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.683377 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d8526fea-87b2-428c-ba81-be13aff83cdd-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-bzx9x\" (UID: \"d8526fea-87b2-428c-ba81-be13aff83cdd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bzx9x" Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.683560 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/d8526fea-87b2-428c-ba81-be13aff83cdd-service-ca\") pod \"cluster-version-operator-5c965bbfc6-bzx9x\" (UID: \"d8526fea-87b2-428c-ba81-be13aff83cdd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bzx9x" Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.683692 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/d8526fea-87b2-428c-ba81-be13aff83cdd-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-bzx9x\" (UID: \"d8526fea-87b2-428c-ba81-be13aff83cdd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bzx9x" Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.683817 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/d8526fea-87b2-428c-ba81-be13aff83cdd-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-bzx9x\" (UID: \"d8526fea-87b2-428c-ba81-be13aff83cdd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bzx9x" Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.683844 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d8526fea-87b2-428c-ba81-be13aff83cdd-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-bzx9x\" (UID: \"d8526fea-87b2-428c-ba81-be13aff83cdd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bzx9x" Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.737620 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.737674 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.737686 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:26:02 crc kubenswrapper[5021]: E0121 15:26:02.737775 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.737932 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:26:02 crc kubenswrapper[5021]: E0121 15:26:02.738183 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:26:02 crc kubenswrapper[5021]: E0121 15:26:02.738262 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:26:02 crc kubenswrapper[5021]: E0121 15:26:02.738330 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xtd2p" podUID="cb60592c-6770-457b-b2ae-2c6c8f2a4149" Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.749534 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-10 03:23:47.894879233 +0000 UTC Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.749599 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Rotating certificates Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.759030 5021 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.785154 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d8526fea-87b2-428c-ba81-be13aff83cdd-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-bzx9x\" (UID: \"d8526fea-87b2-428c-ba81-be13aff83cdd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bzx9x" Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.785247 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/d8526fea-87b2-428c-ba81-be13aff83cdd-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-bzx9x\" (UID: \"d8526fea-87b2-428c-ba81-be13aff83cdd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bzx9x" Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.785318 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d8526fea-87b2-428c-ba81-be13aff83cdd-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-bzx9x\" (UID: \"d8526fea-87b2-428c-ba81-be13aff83cdd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bzx9x" Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.785359 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/d8526fea-87b2-428c-ba81-be13aff83cdd-service-ca\") pod \"cluster-version-operator-5c965bbfc6-bzx9x\" (UID: \"d8526fea-87b2-428c-ba81-be13aff83cdd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bzx9x" Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.785425 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/d8526fea-87b2-428c-ba81-be13aff83cdd-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-bzx9x\" (UID: \"d8526fea-87b2-428c-ba81-be13aff83cdd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bzx9x" Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.785485 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/d8526fea-87b2-428c-ba81-be13aff83cdd-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-bzx9x\" (UID: \"d8526fea-87b2-428c-ba81-be13aff83cdd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bzx9x" Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.785546 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/d8526fea-87b2-428c-ba81-be13aff83cdd-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-bzx9x\" (UID: \"d8526fea-87b2-428c-ba81-be13aff83cdd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bzx9x" Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.787123 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/d8526fea-87b2-428c-ba81-be13aff83cdd-service-ca\") pod \"cluster-version-operator-5c965bbfc6-bzx9x\" (UID: \"d8526fea-87b2-428c-ba81-be13aff83cdd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bzx9x" Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.793837 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d8526fea-87b2-428c-ba81-be13aff83cdd-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-bzx9x\" (UID: \"d8526fea-87b2-428c-ba81-be13aff83cdd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bzx9x" Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.810148 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d8526fea-87b2-428c-ba81-be13aff83cdd-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-bzx9x\" (UID: \"d8526fea-87b2-428c-ba81-be13aff83cdd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bzx9x" Jan 21 15:26:02 crc kubenswrapper[5021]: I0121 15:26:02.873669 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bzx9x" Jan 21 15:26:03 crc kubenswrapper[5021]: I0121 15:26:03.626636 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bzx9x" event={"ID":"d8526fea-87b2-428c-ba81-be13aff83cdd","Type":"ContainerStarted","Data":"fcd3935ce8d4bc3ad25dff33d34fec6fe67c6fc80ebd2bdece5d2ccf24ba26f8"} Jan 21 15:26:04 crc kubenswrapper[5021]: I0121 15:26:04.630757 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bzx9x" event={"ID":"d8526fea-87b2-428c-ba81-be13aff83cdd","Type":"ContainerStarted","Data":"916ff3ac9ded5a84fcdf52c90d2c4cfa2bc004c7e039f0feec947d6b9768bb19"} Jan 21 15:26:04 crc kubenswrapper[5021]: I0121 15:26:04.737469 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:26:04 crc kubenswrapper[5021]: I0121 15:26:04.737469 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:26:04 crc kubenswrapper[5021]: I0121 15:26:04.737883 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:26:04 crc kubenswrapper[5021]: I0121 15:26:04.738087 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:26:04 crc kubenswrapper[5021]: E0121 15:26:04.738242 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:26:04 crc kubenswrapper[5021]: E0121 15:26:04.738083 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:26:04 crc kubenswrapper[5021]: E0121 15:26:04.738251 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:26:04 crc kubenswrapper[5021]: E0121 15:26:04.738369 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xtd2p" podUID="cb60592c-6770-457b-b2ae-2c6c8f2a4149" Jan 21 15:26:06 crc kubenswrapper[5021]: I0121 15:26:06.737798 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:26:06 crc kubenswrapper[5021]: I0121 15:26:06.737844 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:26:06 crc kubenswrapper[5021]: E0121 15:26:06.737954 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:26:06 crc kubenswrapper[5021]: I0121 15:26:06.738036 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:26:06 crc kubenswrapper[5021]: I0121 15:26:06.738039 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:26:06 crc kubenswrapper[5021]: E0121 15:26:06.738138 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xtd2p" podUID="cb60592c-6770-457b-b2ae-2c6c8f2a4149" Jan 21 15:26:06 crc kubenswrapper[5021]: E0121 15:26:06.738335 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:26:06 crc kubenswrapper[5021]: E0121 15:26:06.738445 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:26:08 crc kubenswrapper[5021]: I0121 15:26:08.737059 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:26:08 crc kubenswrapper[5021]: I0121 15:26:08.737101 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:26:08 crc kubenswrapper[5021]: I0121 15:26:08.737215 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:26:08 crc kubenswrapper[5021]: I0121 15:26:08.737335 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:26:08 crc kubenswrapper[5021]: E0121 15:26:08.738589 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:26:08 crc kubenswrapper[5021]: E0121 15:26:08.739073 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:26:08 crc kubenswrapper[5021]: E0121 15:26:08.739221 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xtd2p" podUID="cb60592c-6770-457b-b2ae-2c6c8f2a4149" Jan 21 15:26:08 crc kubenswrapper[5021]: E0121 15:26:08.739265 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:26:10 crc kubenswrapper[5021]: I0121 15:26:10.737599 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:26:10 crc kubenswrapper[5021]: I0121 15:26:10.737674 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:26:10 crc kubenswrapper[5021]: I0121 15:26:10.737891 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:26:10 crc kubenswrapper[5021]: E0121 15:26:10.738037 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:26:10 crc kubenswrapper[5021]: E0121 15:26:10.738297 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:26:10 crc kubenswrapper[5021]: E0121 15:26:10.738365 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xtd2p" podUID="cb60592c-6770-457b-b2ae-2c6c8f2a4149" Jan 21 15:26:10 crc kubenswrapper[5021]: I0121 15:26:10.738481 5021 scope.go:117] "RemoveContainer" containerID="c7a8ac23da1f14fa76303b89fa5f285acf38ba3869b2cb7ccc9551612ba86912" Jan 21 15:26:10 crc kubenswrapper[5021]: I0121 15:26:10.738556 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:26:10 crc kubenswrapper[5021]: E0121 15:26:10.738597 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-9flhm_openshift-ovn-kubernetes(2e3f3965-4473-46d7-a613-2ed3e4b10ad7)\"" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" Jan 21 15:26:10 crc kubenswrapper[5021]: E0121 15:26:10.738687 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:26:12 crc kubenswrapper[5021]: I0121 15:26:12.737165 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:26:12 crc kubenswrapper[5021]: E0121 15:26:12.737278 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:26:12 crc kubenswrapper[5021]: I0121 15:26:12.737300 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:26:12 crc kubenswrapper[5021]: E0121 15:26:12.737375 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:26:12 crc kubenswrapper[5021]: I0121 15:26:12.737165 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:26:12 crc kubenswrapper[5021]: E0121 15:26:12.737453 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xtd2p" podUID="cb60592c-6770-457b-b2ae-2c6c8f2a4149" Jan 21 15:26:12 crc kubenswrapper[5021]: I0121 15:26:12.737598 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:26:12 crc kubenswrapper[5021]: E0121 15:26:12.737648 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:26:14 crc kubenswrapper[5021]: E0121 15:26:14.392420 5021 kubelet.go:2526] "Housekeeping took longer than expected" err="housekeeping took too long" expected="1s" actual="1.655s" Jan 21 15:26:14 crc kubenswrapper[5021]: I0121 15:26:14.392945 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:26:14 crc kubenswrapper[5021]: I0121 15:26:14.392965 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:26:14 crc kubenswrapper[5021]: I0121 15:26:14.393074 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:26:14 crc kubenswrapper[5021]: E0121 15:26:14.393068 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:26:14 crc kubenswrapper[5021]: E0121 15:26:14.393153 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xtd2p" podUID="cb60592c-6770-457b-b2ae-2c6c8f2a4149" Jan 21 15:26:14 crc kubenswrapper[5021]: I0121 15:26:14.393227 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:26:14 crc kubenswrapper[5021]: E0121 15:26:14.393408 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:26:14 crc kubenswrapper[5021]: E0121 15:26:14.393626 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:26:15 crc kubenswrapper[5021]: I0121 15:26:15.660353 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-sd7j9_49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a/kube-multus/1.log" Jan 21 15:26:15 crc kubenswrapper[5021]: I0121 15:26:15.661180 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-sd7j9_49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a/kube-multus/0.log" Jan 21 15:26:15 crc kubenswrapper[5021]: I0121 15:26:15.661224 5021 generic.go:334] "Generic (PLEG): container finished" podID="49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a" containerID="9276d2f11794c73e2d9b67ba12b81ec547e14eb6a5808fb86f64d46a12cffcd3" exitCode=1 Jan 21 15:26:15 crc kubenswrapper[5021]: I0121 15:26:15.661264 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-sd7j9" event={"ID":"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a","Type":"ContainerDied","Data":"9276d2f11794c73e2d9b67ba12b81ec547e14eb6a5808fb86f64d46a12cffcd3"} Jan 21 15:26:15 crc kubenswrapper[5021]: I0121 15:26:15.661300 5021 scope.go:117] "RemoveContainer" containerID="ee3a703c230946ab5d32db22cafa6b8a8945566d53e5b97402adaf6701f3addd" Jan 21 15:26:15 crc kubenswrapper[5021]: I0121 15:26:15.661707 5021 scope.go:117] "RemoveContainer" containerID="9276d2f11794c73e2d9b67ba12b81ec547e14eb6a5808fb86f64d46a12cffcd3" Jan 21 15:26:15 crc kubenswrapper[5021]: E0121 15:26:15.661879 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-sd7j9_openshift-multus(49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a)\"" pod="openshift-multus/multus-sd7j9" podUID="49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a" Jan 21 15:26:15 crc kubenswrapper[5021]: I0121 15:26:15.681029 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bzx9x" podStartSLOduration=93.68101023 podStartE2EDuration="1m33.68101023s" podCreationTimestamp="2026-01-21 15:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:26:04.645051555 +0000 UTC m=+106.180165444" watchObservedRunningTime="2026-01-21 15:26:15.68101023 +0000 UTC m=+117.216124129" Jan 21 15:26:15 crc kubenswrapper[5021]: I0121 15:26:15.737738 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:26:15 crc kubenswrapper[5021]: E0121 15:26:15.737857 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:26:15 crc kubenswrapper[5021]: I0121 15:26:15.738048 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:26:15 crc kubenswrapper[5021]: E0121 15:26:15.738112 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:26:15 crc kubenswrapper[5021]: I0121 15:26:15.738245 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:26:15 crc kubenswrapper[5021]: E0121 15:26:15.738332 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xtd2p" podUID="cb60592c-6770-457b-b2ae-2c6c8f2a4149" Jan 21 15:26:16 crc kubenswrapper[5021]: I0121 15:26:16.666374 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-sd7j9_49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a/kube-multus/1.log" Jan 21 15:26:16 crc kubenswrapper[5021]: I0121 15:26:16.737168 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:26:16 crc kubenswrapper[5021]: E0121 15:26:16.737311 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:26:17 crc kubenswrapper[5021]: I0121 15:26:17.737320 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:26:17 crc kubenswrapper[5021]: I0121 15:26:17.737331 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:26:17 crc kubenswrapper[5021]: E0121 15:26:17.738247 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:26:17 crc kubenswrapper[5021]: I0121 15:26:17.737428 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:26:17 crc kubenswrapper[5021]: E0121 15:26:17.738303 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:26:17 crc kubenswrapper[5021]: E0121 15:26:17.738449 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xtd2p" podUID="cb60592c-6770-457b-b2ae-2c6c8f2a4149" Jan 21 15:26:18 crc kubenswrapper[5021]: I0121 15:26:18.737260 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:26:18 crc kubenswrapper[5021]: E0121 15:26:18.738856 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:26:18 crc kubenswrapper[5021]: E0121 15:26:18.771812 5021 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Jan 21 15:26:18 crc kubenswrapper[5021]: E0121 15:26:18.813058 5021 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 21 15:26:19 crc kubenswrapper[5021]: I0121 15:26:19.737336 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:26:19 crc kubenswrapper[5021]: I0121 15:26:19.737422 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:26:19 crc kubenswrapper[5021]: E0121 15:26:19.737465 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:26:19 crc kubenswrapper[5021]: I0121 15:26:19.737479 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:26:19 crc kubenswrapper[5021]: E0121 15:26:19.737565 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:26:19 crc kubenswrapper[5021]: E0121 15:26:19.737683 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xtd2p" podUID="cb60592c-6770-457b-b2ae-2c6c8f2a4149" Jan 21 15:26:20 crc kubenswrapper[5021]: I0121 15:26:20.737664 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:26:20 crc kubenswrapper[5021]: E0121 15:26:20.737852 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:26:21 crc kubenswrapper[5021]: I0121 15:26:21.737473 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:26:21 crc kubenswrapper[5021]: I0121 15:26:21.737587 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:26:21 crc kubenswrapper[5021]: E0121 15:26:21.737625 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xtd2p" podUID="cb60592c-6770-457b-b2ae-2c6c8f2a4149" Jan 21 15:26:21 crc kubenswrapper[5021]: E0121 15:26:21.737800 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:26:21 crc kubenswrapper[5021]: I0121 15:26:21.737861 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:26:21 crc kubenswrapper[5021]: E0121 15:26:21.737954 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:26:22 crc kubenswrapper[5021]: I0121 15:26:22.738207 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:26:22 crc kubenswrapper[5021]: E0121 15:26:22.738426 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:26:23 crc kubenswrapper[5021]: I0121 15:26:23.737206 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:26:23 crc kubenswrapper[5021]: I0121 15:26:23.737206 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:26:23 crc kubenswrapper[5021]: E0121 15:26:23.737399 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xtd2p" podUID="cb60592c-6770-457b-b2ae-2c6c8f2a4149" Jan 21 15:26:23 crc kubenswrapper[5021]: I0121 15:26:23.737235 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:26:23 crc kubenswrapper[5021]: E0121 15:26:23.737655 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:26:23 crc kubenswrapper[5021]: E0121 15:26:23.737714 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:26:23 crc kubenswrapper[5021]: E0121 15:26:23.814430 5021 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 21 15:26:24 crc kubenswrapper[5021]: I0121 15:26:24.737082 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:26:24 crc kubenswrapper[5021]: E0121 15:26:24.737282 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:26:24 crc kubenswrapper[5021]: I0121 15:26:24.738086 5021 scope.go:117] "RemoveContainer" containerID="c7a8ac23da1f14fa76303b89fa5f285acf38ba3869b2cb7ccc9551612ba86912" Jan 21 15:26:25 crc kubenswrapper[5021]: I0121 15:26:25.685968 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-xtd2p"] Jan 21 15:26:25 crc kubenswrapper[5021]: I0121 15:26:25.686676 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:26:25 crc kubenswrapper[5021]: E0121 15:26:25.686809 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xtd2p" podUID="cb60592c-6770-457b-b2ae-2c6c8f2a4149" Jan 21 15:26:25 crc kubenswrapper[5021]: I0121 15:26:25.696870 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-9flhm_2e3f3965-4473-46d7-a613-2ed3e4b10ad7/ovnkube-controller/3.log" Jan 21 15:26:25 crc kubenswrapper[5021]: I0121 15:26:25.701013 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" event={"ID":"2e3f3965-4473-46d7-a613-2ed3e4b10ad7","Type":"ContainerStarted","Data":"a5bb94789cb1dce31aaed81c325ae13132b10033281ae65f6ce37e91b74f6b8c"} Jan 21 15:26:25 crc kubenswrapper[5021]: I0121 15:26:25.701965 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:26:25 crc kubenswrapper[5021]: I0121 15:26:25.736137 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" podStartSLOduration=103.736108739 podStartE2EDuration="1m43.736108739s" podCreationTimestamp="2026-01-21 15:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:26:25.735578635 +0000 UTC m=+127.270692524" watchObservedRunningTime="2026-01-21 15:26:25.736108739 +0000 UTC m=+127.271222618" Jan 21 15:26:25 crc kubenswrapper[5021]: I0121 15:26:25.736826 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:26:25 crc kubenswrapper[5021]: E0121 15:26:25.736998 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:26:25 crc kubenswrapper[5021]: I0121 15:26:25.737107 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:26:25 crc kubenswrapper[5021]: E0121 15:26:25.737229 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:26:26 crc kubenswrapper[5021]: I0121 15:26:26.737797 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:26:26 crc kubenswrapper[5021]: E0121 15:26:26.738023 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:26:27 crc kubenswrapper[5021]: I0121 15:26:27.737856 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:26:27 crc kubenswrapper[5021]: I0121 15:26:27.737961 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:26:27 crc kubenswrapper[5021]: I0121 15:26:27.737856 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:26:27 crc kubenswrapper[5021]: E0121 15:26:27.738093 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:26:27 crc kubenswrapper[5021]: E0121 15:26:27.738161 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:26:27 crc kubenswrapper[5021]: E0121 15:26:27.738241 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xtd2p" podUID="cb60592c-6770-457b-b2ae-2c6c8f2a4149" Jan 21 15:26:28 crc kubenswrapper[5021]: I0121 15:26:28.737835 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:26:28 crc kubenswrapper[5021]: E0121 15:26:28.739270 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:26:28 crc kubenswrapper[5021]: E0121 15:26:28.815988 5021 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 21 15:26:29 crc kubenswrapper[5021]: I0121 15:26:29.736873 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:26:29 crc kubenswrapper[5021]: I0121 15:26:29.736922 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:26:29 crc kubenswrapper[5021]: E0121 15:26:29.737054 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xtd2p" podUID="cb60592c-6770-457b-b2ae-2c6c8f2a4149" Jan 21 15:26:29 crc kubenswrapper[5021]: I0121 15:26:29.737029 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:26:29 crc kubenswrapper[5021]: E0121 15:26:29.737386 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:26:29 crc kubenswrapper[5021]: E0121 15:26:29.737481 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:26:29 crc kubenswrapper[5021]: I0121 15:26:29.737624 5021 scope.go:117] "RemoveContainer" containerID="9276d2f11794c73e2d9b67ba12b81ec547e14eb6a5808fb86f64d46a12cffcd3" Jan 21 15:26:30 crc kubenswrapper[5021]: I0121 15:26:30.720730 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-sd7j9_49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a/kube-multus/1.log" Jan 21 15:26:30 crc kubenswrapper[5021]: I0121 15:26:30.721330 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-sd7j9" event={"ID":"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a","Type":"ContainerStarted","Data":"40ece78bf00f59d0318b189d59d2347204f425bb98265e7412a6ecfb2793009f"} Jan 21 15:26:30 crc kubenswrapper[5021]: I0121 15:26:30.739502 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:26:30 crc kubenswrapper[5021]: E0121 15:26:30.740795 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:26:31 crc kubenswrapper[5021]: I0121 15:26:31.736814 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:26:31 crc kubenswrapper[5021]: I0121 15:26:31.736827 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:26:31 crc kubenswrapper[5021]: I0121 15:26:31.736873 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:26:31 crc kubenswrapper[5021]: E0121 15:26:31.737076 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:26:31 crc kubenswrapper[5021]: E0121 15:26:31.737187 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:26:31 crc kubenswrapper[5021]: E0121 15:26:31.737634 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xtd2p" podUID="cb60592c-6770-457b-b2ae-2c6c8f2a4149" Jan 21 15:26:32 crc kubenswrapper[5021]: I0121 15:26:32.737762 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:26:32 crc kubenswrapper[5021]: E0121 15:26:32.737922 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 21 15:26:33 crc kubenswrapper[5021]: I0121 15:26:33.737608 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:26:33 crc kubenswrapper[5021]: I0121 15:26:33.737618 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:26:33 crc kubenswrapper[5021]: E0121 15:26:33.737807 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 21 15:26:33 crc kubenswrapper[5021]: E0121 15:26:33.738051 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 21 15:26:33 crc kubenswrapper[5021]: I0121 15:26:33.738225 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:26:33 crc kubenswrapper[5021]: E0121 15:26:33.738449 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-xtd2p" podUID="cb60592c-6770-457b-b2ae-2c6c8f2a4149" Jan 21 15:26:34 crc kubenswrapper[5021]: I0121 15:26:34.737532 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:26:34 crc kubenswrapper[5021]: I0121 15:26:34.740467 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 21 15:26:34 crc kubenswrapper[5021]: I0121 15:26:34.743127 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 21 15:26:35 crc kubenswrapper[5021]: I0121 15:26:35.736728 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:26:35 crc kubenswrapper[5021]: I0121 15:26:35.736974 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:26:35 crc kubenswrapper[5021]: I0121 15:26:35.737040 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:26:35 crc kubenswrapper[5021]: I0121 15:26:35.740354 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 21 15:26:35 crc kubenswrapper[5021]: I0121 15:26:35.740670 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 21 15:26:35 crc kubenswrapper[5021]: I0121 15:26:35.740858 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Jan 21 15:26:35 crc kubenswrapper[5021]: I0121 15:26:35.741053 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.468547 5021 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.504795 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-xlz6c"] Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.505393 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-xlz6c" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.505559 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-lp8vp"] Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.506210 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-lp8vp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.506463 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-g8wp8"] Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.507383 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-xcc8f"] Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.507644 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-g8wp8" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.507852 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-xcc8f" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.508992 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-zf7sb"] Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.509750 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-zf7sb" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.512153 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-khdzj"] Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.512672 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-khdzj" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.520956 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.521388 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.521528 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.521823 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.522155 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.522389 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.522675 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.522985 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.523090 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.523196 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.523283 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.523396 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.523495 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.523613 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.523717 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.523934 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.524120 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.524265 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.524314 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-4c5bh"] Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.524406 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.524597 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.524939 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-qcz9k"] Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.525188 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-92qbd"] Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.525307 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4c5bh" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.525339 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qcz9k" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.525664 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-92qbd" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.526462 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.526572 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.526582 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.526590 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.526678 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.526771 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.526852 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.526874 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.526965 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.526989 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.527090 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.527176 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.527212 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.527295 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.527343 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.527429 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.530822 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.536548 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-cgt27"] Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.549846 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/8c09c820-79c0-4e63-b063-2f01381c96fd-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-xcc8f\" (UID: \"8c09c820-79c0-4e63-b063-2f01381c96fd\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-xcc8f" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.549952 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8880753d-187b-4b18-a292-43ed561b6d8d-trusted-ca\") pod \"console-operator-58897d9998-zf7sb\" (UID: \"8880753d-187b-4b18-a292-43ed561b6d8d\") " pod="openshift-console-operator/console-operator-58897d9998-zf7sb" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.549991 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/d4b323b2-0188-4e04-ab45-bb9689a750a2-console-serving-cert\") pod \"console-f9d7485db-92qbd\" (UID: \"d4b323b2-0188-4e04-ab45-bb9689a750a2\") " pod="openshift-console/console-f9d7485db-92qbd" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.550023 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/d4b323b2-0188-4e04-ab45-bb9689a750a2-service-ca\") pod \"console-f9d7485db-92qbd\" (UID: \"d4b323b2-0188-4e04-ab45-bb9689a750a2\") " pod="openshift-console/console-f9d7485db-92qbd" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.550051 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/e72ac95a-ad13-408c-b595-9e983c185119-etcd-serving-ca\") pod \"apiserver-76f77b778f-g8wp8\" (UID: \"e72ac95a-ad13-408c-b595-9e983c185119\") " pod="openshift-apiserver/apiserver-76f77b778f-g8wp8" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.550101 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/e72ac95a-ad13-408c-b595-9e983c185119-audit-dir\") pod \"apiserver-76f77b778f-g8wp8\" (UID: \"e72ac95a-ad13-408c-b595-9e983c185119\") " pod="openshift-apiserver/apiserver-76f77b778f-g8wp8" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.550131 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/521d3dca-6ae7-48f6-a3bc-859493564f8d-client-ca\") pod \"controller-manager-879f6c89f-xlz6c\" (UID: \"521d3dca-6ae7-48f6-a3bc-859493564f8d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xlz6c" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.550161 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z8g55\" (UniqueName: \"kubernetes.io/projected/8880753d-187b-4b18-a292-43ed561b6d8d-kube-api-access-z8g55\") pod \"console-operator-58897d9998-zf7sb\" (UID: \"8880753d-187b-4b18-a292-43ed561b6d8d\") " pod="openshift-console-operator/console-operator-58897d9998-zf7sb" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.550191 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/279af6f8-0575-4617-9391-8251ab3db6f9-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-khdzj\" (UID: \"279af6f8-0575-4617-9391-8251ab3db6f9\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-khdzj" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.550232 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/680fb251-0f8f-4a72-a386-9ee555576980-config\") pod \"machine-approver-56656f9798-4c5bh\" (UID: \"680fb251-0f8f-4a72-a386-9ee555576980\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4c5bh" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.550264 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8880753d-187b-4b18-a292-43ed561b6d8d-config\") pod \"console-operator-58897d9998-zf7sb\" (UID: \"8880753d-187b-4b18-a292-43ed561b6d8d\") " pod="openshift-console-operator/console-operator-58897d9998-zf7sb" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.550294 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/680fb251-0f8f-4a72-a386-9ee555576980-machine-approver-tls\") pod \"machine-approver-56656f9798-4c5bh\" (UID: \"680fb251-0f8f-4a72-a386-9ee555576980\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4c5bh" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.550331 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-52tkb\" (UniqueName: \"kubernetes.io/projected/279af6f8-0575-4617-9391-8251ab3db6f9-kube-api-access-52tkb\") pod \"openshift-apiserver-operator-796bbdcf4f-khdzj\" (UID: \"279af6f8-0575-4617-9391-8251ab3db6f9\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-khdzj" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.550363 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/d4b323b2-0188-4e04-ab45-bb9689a750a2-oauth-serving-cert\") pod \"console-f9d7485db-92qbd\" (UID: \"d4b323b2-0188-4e04-ab45-bb9689a750a2\") " pod="openshift-console/console-f9d7485db-92qbd" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.550399 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/279af6f8-0575-4617-9391-8251ab3db6f9-config\") pod \"openshift-apiserver-operator-796bbdcf4f-khdzj\" (UID: \"279af6f8-0575-4617-9391-8251ab3db6f9\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-khdzj" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.550432 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e72ac95a-ad13-408c-b595-9e983c185119-serving-cert\") pod \"apiserver-76f77b778f-g8wp8\" (UID: \"e72ac95a-ad13-408c-b595-9e983c185119\") " pod="openshift-apiserver/apiserver-76f77b778f-g8wp8" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.550466 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e72ac95a-ad13-408c-b595-9e983c185119-trusted-ca-bundle\") pod \"apiserver-76f77b778f-g8wp8\" (UID: \"e72ac95a-ad13-408c-b595-9e983c185119\") " pod="openshift-apiserver/apiserver-76f77b778f-g8wp8" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.550498 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qsrzh\" (UniqueName: \"kubernetes.io/projected/680fb251-0f8f-4a72-a386-9ee555576980-kube-api-access-qsrzh\") pod \"machine-approver-56656f9798-4c5bh\" (UID: \"680fb251-0f8f-4a72-a386-9ee555576980\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4c5bh" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.550542 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b17f4af7-2215-4afd-810b-ae1f9a5ca41a-config\") pod \"route-controller-manager-6576b87f9c-qcz9k\" (UID: \"b17f4af7-2215-4afd-810b-ae1f9a5ca41a\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qcz9k" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.550572 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/405cbe2f-c2e9-4d3a-9ed7-cf937eb2cead-serving-cert\") pod \"authentication-operator-69f744f599-lp8vp\" (UID: \"405cbe2f-c2e9-4d3a-9ed7-cf937eb2cead\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-lp8vp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.550604 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mwjdn\" (UniqueName: \"kubernetes.io/projected/521d3dca-6ae7-48f6-a3bc-859493564f8d-kube-api-access-mwjdn\") pod \"controller-manager-879f6c89f-xlz6c\" (UID: \"521d3dca-6ae7-48f6-a3bc-859493564f8d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xlz6c" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.550634 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d4b323b2-0188-4e04-ab45-bb9689a750a2-trusted-ca-bundle\") pod \"console-f9d7485db-92qbd\" (UID: \"d4b323b2-0188-4e04-ab45-bb9689a750a2\") " pod="openshift-console/console-f9d7485db-92qbd" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.550676 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/521d3dca-6ae7-48f6-a3bc-859493564f8d-config\") pod \"controller-manager-879f6c89f-xlz6c\" (UID: \"521d3dca-6ae7-48f6-a3bc-859493564f8d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xlz6c" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.550707 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/521d3dca-6ae7-48f6-a3bc-859493564f8d-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-xlz6c\" (UID: \"521d3dca-6ae7-48f6-a3bc-859493564f8d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xlz6c" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.550734 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/405cbe2f-c2e9-4d3a-9ed7-cf937eb2cead-service-ca-bundle\") pod \"authentication-operator-69f744f599-lp8vp\" (UID: \"405cbe2f-c2e9-4d3a-9ed7-cf937eb2cead\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-lp8vp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.550803 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/e72ac95a-ad13-408c-b595-9e983c185119-node-pullsecrets\") pod \"apiserver-76f77b778f-g8wp8\" (UID: \"e72ac95a-ad13-408c-b595-9e983c185119\") " pod="openshift-apiserver/apiserver-76f77b778f-g8wp8" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.550839 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/e72ac95a-ad13-408c-b595-9e983c185119-etcd-client\") pod \"apiserver-76f77b778f-g8wp8\" (UID: \"e72ac95a-ad13-408c-b595-9e983c185119\") " pod="openshift-apiserver/apiserver-76f77b778f-g8wp8" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.550944 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/d4b323b2-0188-4e04-ab45-bb9689a750a2-console-config\") pod \"console-f9d7485db-92qbd\" (UID: \"d4b323b2-0188-4e04-ab45-bb9689a750a2\") " pod="openshift-console/console-f9d7485db-92qbd" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.551200 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/e72ac95a-ad13-408c-b595-9e983c185119-audit\") pod \"apiserver-76f77b778f-g8wp8\" (UID: \"e72ac95a-ad13-408c-b595-9e983c185119\") " pod="openshift-apiserver/apiserver-76f77b778f-g8wp8" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.551238 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vm8gf\" (UniqueName: \"kubernetes.io/projected/e72ac95a-ad13-408c-b595-9e983c185119-kube-api-access-vm8gf\") pod \"apiserver-76f77b778f-g8wp8\" (UID: \"e72ac95a-ad13-408c-b595-9e983c185119\") " pod="openshift-apiserver/apiserver-76f77b778f-g8wp8" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.551311 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xlxt8\" (UniqueName: \"kubernetes.io/projected/b17f4af7-2215-4afd-810b-ae1f9a5ca41a-kube-api-access-xlxt8\") pod \"route-controller-manager-6576b87f9c-qcz9k\" (UID: \"b17f4af7-2215-4afd-810b-ae1f9a5ca41a\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qcz9k" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.551445 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t2s99\" (UniqueName: \"kubernetes.io/projected/d4b323b2-0188-4e04-ab45-bb9689a750a2-kube-api-access-t2s99\") pod \"console-f9d7485db-92qbd\" (UID: \"d4b323b2-0188-4e04-ab45-bb9689a750a2\") " pod="openshift-console/console-f9d7485db-92qbd" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.551513 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/d4b323b2-0188-4e04-ab45-bb9689a750a2-console-oauth-config\") pod \"console-f9d7485db-92qbd\" (UID: \"d4b323b2-0188-4e04-ab45-bb9689a750a2\") " pod="openshift-console/console-f9d7485db-92qbd" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.551588 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b17f4af7-2215-4afd-810b-ae1f9a5ca41a-serving-cert\") pod \"route-controller-manager-6576b87f9c-qcz9k\" (UID: \"b17f4af7-2215-4afd-810b-ae1f9a5ca41a\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qcz9k" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.551683 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/405cbe2f-c2e9-4d3a-9ed7-cf937eb2cead-config\") pod \"authentication-operator-69f744f599-lp8vp\" (UID: \"405cbe2f-c2e9-4d3a-9ed7-cf937eb2cead\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-lp8vp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.551778 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e72ac95a-ad13-408c-b595-9e983c185119-config\") pod \"apiserver-76f77b778f-g8wp8\" (UID: \"e72ac95a-ad13-408c-b595-9e983c185119\") " pod="openshift-apiserver/apiserver-76f77b778f-g8wp8" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.551862 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7zmgk\" (UniqueName: \"kubernetes.io/projected/405cbe2f-c2e9-4d3a-9ed7-cf937eb2cead-kube-api-access-7zmgk\") pod \"authentication-operator-69f744f599-lp8vp\" (UID: \"405cbe2f-c2e9-4d3a-9ed7-cf937eb2cead\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-lp8vp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.551963 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/680fb251-0f8f-4a72-a386-9ee555576980-auth-proxy-config\") pod \"machine-approver-56656f9798-4c5bh\" (UID: \"680fb251-0f8f-4a72-a386-9ee555576980\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4c5bh" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.552029 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b17f4af7-2215-4afd-810b-ae1f9a5ca41a-client-ca\") pod \"route-controller-manager-6576b87f9c-qcz9k\" (UID: \"b17f4af7-2215-4afd-810b-ae1f9a5ca41a\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qcz9k" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.552283 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/e72ac95a-ad13-408c-b595-9e983c185119-image-import-ca\") pod \"apiserver-76f77b778f-g8wp8\" (UID: \"e72ac95a-ad13-408c-b595-9e983c185119\") " pod="openshift-apiserver/apiserver-76f77b778f-g8wp8" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.552386 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/521d3dca-6ae7-48f6-a3bc-859493564f8d-serving-cert\") pod \"controller-manager-879f6c89f-xlz6c\" (UID: \"521d3dca-6ae7-48f6-a3bc-859493564f8d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xlz6c" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.552449 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-prd8h\" (UniqueName: \"kubernetes.io/projected/8c09c820-79c0-4e63-b063-2f01381c96fd-kube-api-access-prd8h\") pod \"cluster-samples-operator-665b6dd947-xcc8f\" (UID: \"8c09c820-79c0-4e63-b063-2f01381c96fd\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-xcc8f" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.552486 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/405cbe2f-c2e9-4d3a-9ed7-cf937eb2cead-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-lp8vp\" (UID: \"405cbe2f-c2e9-4d3a-9ed7-cf937eb2cead\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-lp8vp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.552633 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/e72ac95a-ad13-408c-b595-9e983c185119-encryption-config\") pod \"apiserver-76f77b778f-g8wp8\" (UID: \"e72ac95a-ad13-408c-b595-9e983c185119\") " pod="openshift-apiserver/apiserver-76f77b778f-g8wp8" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.552747 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8880753d-187b-4b18-a292-43ed561b6d8d-serving-cert\") pod \"console-operator-58897d9998-zf7sb\" (UID: \"8880753d-187b-4b18-a292-43ed561b6d8d\") " pod="openshift-console-operator/console-operator-58897d9998-zf7sb" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.555268 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-wlk6l"] Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.570878 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.571347 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.571400 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.571575 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.571661 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.571666 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.571999 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.576472 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.571350 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.572015 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.571296 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.571213 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.579207 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-cgt27" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.571258 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.580387 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zld8h"] Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.580651 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlk6l" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.580697 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-kn7v9"] Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.580774 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zld8h" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.581976 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-bw5fp"] Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.582155 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.582280 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.582379 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.582542 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.582656 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.583074 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.584295 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-vzlcs"] Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.584308 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.584346 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-kn7v9" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.584560 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-lp8vp"] Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.584609 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.584614 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-vzlcs" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.584743 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.584757 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.587404 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.589336 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-cvs5m"] Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.591951 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.593385 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-xlz6c"] Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.593416 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-cgt27"] Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.593531 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-cvs5m" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.598851 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-zf7sb"] Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.600075 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zld8h"] Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.605160 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-g8wp8"] Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.605377 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-xcc8f"] Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.607857 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-cvs5m"] Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.608283 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.609051 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.609194 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.609275 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.609371 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.609416 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.609450 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.609576 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.609672 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.609801 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.609805 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.609887 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.610033 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.610114 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.610161 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.610198 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.610114 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.610341 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.610414 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.610497 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.611557 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.611669 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.611770 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.613011 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.617380 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.617478 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.618795 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.618881 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-qcz9k"] Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.620061 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-vzlcs"] Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.620933 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-92qbd"] Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.623010 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-bw5fp"] Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.624163 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-kn7v9"] Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.641106 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-wlk6l"] Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.646423 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.646557 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.647125 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.647440 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.649130 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.650175 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.654363 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.654622 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.654726 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.655798 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.655887 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.656297 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.660046 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.660333 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-khdzj"] Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.661503 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/a62ba26d-f037-478b-8dd1-47ffb968b8a6-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-cgt27\" (UID: \"a62ba26d-f037-478b-8dd1-47ffb968b8a6\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-cgt27" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.661562 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d8a36d28-fc2f-44fc-adca-fe218362ba3a-serving-cert\") pod \"openshift-config-operator-7777fb866f-kn7v9\" (UID: \"d8a36d28-fc2f-44fc-adca-fe218362ba3a\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-kn7v9" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.661592 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-prd8h\" (UniqueName: \"kubernetes.io/projected/8c09c820-79c0-4e63-b063-2f01381c96fd-kube-api-access-prd8h\") pod \"cluster-samples-operator-665b6dd947-xcc8f\" (UID: \"8c09c820-79c0-4e63-b063-2f01381c96fd\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-xcc8f" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.661644 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/405cbe2f-c2e9-4d3a-9ed7-cf937eb2cead-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-lp8vp\" (UID: \"405cbe2f-c2e9-4d3a-9ed7-cf937eb2cead\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-lp8vp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.661666 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-bw5fp\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.661693 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/e72ac95a-ad13-408c-b595-9e983c185119-encryption-config\") pod \"apiserver-76f77b778f-g8wp8\" (UID: \"e72ac95a-ad13-408c-b595-9e983c185119\") " pod="openshift-apiserver/apiserver-76f77b778f-g8wp8" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.661750 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/d285a21e-c631-453d-9f22-cbaa397f714a-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-zld8h\" (UID: \"d285a21e-c631-453d-9f22-cbaa397f714a\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zld8h" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.661796 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mrv9p\" (UniqueName: \"kubernetes.io/projected/602fe93d-2ebf-458c-8dfb-2cd9ba0ce57b-kube-api-access-mrv9p\") pod \"dns-operator-744455d44c-cvs5m\" (UID: \"602fe93d-2ebf-458c-8dfb-2cd9ba0ce57b\") " pod="openshift-dns-operator/dns-operator-744455d44c-cvs5m" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.661825 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/549f101f-6acf-41be-9263-57bb5902cbd6-audit-dir\") pod \"apiserver-7bbb656c7d-wlk6l\" (UID: \"549f101f-6acf-41be-9263-57bb5902cbd6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlk6l" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.661851 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-bw5fp\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.661890 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8880753d-187b-4b18-a292-43ed561b6d8d-serving-cert\") pod \"console-operator-58897d9998-zf7sb\" (UID: \"8880753d-187b-4b18-a292-43ed561b6d8d\") " pod="openshift-console-operator/console-operator-58897d9998-zf7sb" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.661928 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-bw5fp\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.661949 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-bw5fp\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.661971 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/8c09c820-79c0-4e63-b063-2f01381c96fd-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-xcc8f\" (UID: \"8c09c820-79c0-4e63-b063-2f01381c96fd\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-xcc8f" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.661993 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8880753d-187b-4b18-a292-43ed561b6d8d-trusted-ca\") pod \"console-operator-58897d9998-zf7sb\" (UID: \"8880753d-187b-4b18-a292-43ed561b6d8d\") " pod="openshift-console-operator/console-operator-58897d9998-zf7sb" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.662009 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-bw5fp\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.662034 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/d4b323b2-0188-4e04-ab45-bb9689a750a2-console-serving-cert\") pod \"console-f9d7485db-92qbd\" (UID: \"d4b323b2-0188-4e04-ab45-bb9689a750a2\") " pod="openshift-console/console-f9d7485db-92qbd" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.662056 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/d4b323b2-0188-4e04-ab45-bb9689a750a2-service-ca\") pod \"console-f9d7485db-92qbd\" (UID: \"d4b323b2-0188-4e04-ab45-bb9689a750a2\") " pod="openshift-console/console-f9d7485db-92qbd" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.662077 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/e72ac95a-ad13-408c-b595-9e983c185119-etcd-serving-ca\") pod \"apiserver-76f77b778f-g8wp8\" (UID: \"e72ac95a-ad13-408c-b595-9e983c185119\") " pod="openshift-apiserver/apiserver-76f77b778f-g8wp8" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.662114 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/e72ac95a-ad13-408c-b595-9e983c185119-audit-dir\") pod \"apiserver-76f77b778f-g8wp8\" (UID: \"e72ac95a-ad13-408c-b595-9e983c185119\") " pod="openshift-apiserver/apiserver-76f77b778f-g8wp8" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.662136 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/549f101f-6acf-41be-9263-57bb5902cbd6-serving-cert\") pod \"apiserver-7bbb656c7d-wlk6l\" (UID: \"549f101f-6acf-41be-9263-57bb5902cbd6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlk6l" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.662156 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zvj75\" (UniqueName: \"kubernetes.io/projected/1eae8258-0ffa-4aad-9ac4-747259f4cae0-kube-api-access-zvj75\") pod \"downloads-7954f5f757-vzlcs\" (UID: \"1eae8258-0ffa-4aad-9ac4-747259f4cae0\") " pod="openshift-console/downloads-7954f5f757-vzlcs" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.662177 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d7vxc\" (UniqueName: \"kubernetes.io/projected/a62ba26d-f037-478b-8dd1-47ffb968b8a6-kube-api-access-d7vxc\") pod \"machine-api-operator-5694c8668f-cgt27\" (UID: \"a62ba26d-f037-478b-8dd1-47ffb968b8a6\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-cgt27" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.662201 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z8g55\" (UniqueName: \"kubernetes.io/projected/8880753d-187b-4b18-a292-43ed561b6d8d-kube-api-access-z8g55\") pod \"console-operator-58897d9998-zf7sb\" (UID: \"8880753d-187b-4b18-a292-43ed561b6d8d\") " pod="openshift-console-operator/console-operator-58897d9998-zf7sb" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.662225 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/279af6f8-0575-4617-9391-8251ab3db6f9-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-khdzj\" (UID: \"279af6f8-0575-4617-9391-8251ab3db6f9\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-khdzj" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.662261 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/521d3dca-6ae7-48f6-a3bc-859493564f8d-client-ca\") pod \"controller-manager-879f6c89f-xlz6c\" (UID: \"521d3dca-6ae7-48f6-a3bc-859493564f8d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xlz6c" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.662286 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/680fb251-0f8f-4a72-a386-9ee555576980-config\") pod \"machine-approver-56656f9798-4c5bh\" (UID: \"680fb251-0f8f-4a72-a386-9ee555576980\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4c5bh" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.662308 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8880753d-187b-4b18-a292-43ed561b6d8d-config\") pod \"console-operator-58897d9998-zf7sb\" (UID: \"8880753d-187b-4b18-a292-43ed561b6d8d\") " pod="openshift-console-operator/console-operator-58897d9998-zf7sb" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.662327 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/549f101f-6acf-41be-9263-57bb5902cbd6-audit-policies\") pod \"apiserver-7bbb656c7d-wlk6l\" (UID: \"549f101f-6acf-41be-9263-57bb5902cbd6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlk6l" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.662348 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/680fb251-0f8f-4a72-a386-9ee555576980-machine-approver-tls\") pod \"machine-approver-56656f9798-4c5bh\" (UID: \"680fb251-0f8f-4a72-a386-9ee555576980\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4c5bh" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.662371 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-52tkb\" (UniqueName: \"kubernetes.io/projected/279af6f8-0575-4617-9391-8251ab3db6f9-kube-api-access-52tkb\") pod \"openshift-apiserver-operator-796bbdcf4f-khdzj\" (UID: \"279af6f8-0575-4617-9391-8251ab3db6f9\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-khdzj" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.662392 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/d4b323b2-0188-4e04-ab45-bb9689a750a2-oauth-serving-cert\") pod \"console-f9d7485db-92qbd\" (UID: \"d4b323b2-0188-4e04-ab45-bb9689a750a2\") " pod="openshift-console/console-f9d7485db-92qbd" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.662412 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-bw5fp\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.662436 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vpj58\" (UniqueName: \"kubernetes.io/projected/d8a36d28-fc2f-44fc-adca-fe218362ba3a-kube-api-access-vpj58\") pod \"openshift-config-operator-7777fb866f-kn7v9\" (UID: \"d8a36d28-fc2f-44fc-adca-fe218362ba3a\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-kn7v9" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.662454 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e72ac95a-ad13-408c-b595-9e983c185119-serving-cert\") pod \"apiserver-76f77b778f-g8wp8\" (UID: \"e72ac95a-ad13-408c-b595-9e983c185119\") " pod="openshift-apiserver/apiserver-76f77b778f-g8wp8" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.662473 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e72ac95a-ad13-408c-b595-9e983c185119-trusted-ca-bundle\") pod \"apiserver-76f77b778f-g8wp8\" (UID: \"e72ac95a-ad13-408c-b595-9e983c185119\") " pod="openshift-apiserver/apiserver-76f77b778f-g8wp8" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.662491 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/549f101f-6acf-41be-9263-57bb5902cbd6-encryption-config\") pod \"apiserver-7bbb656c7d-wlk6l\" (UID: \"549f101f-6acf-41be-9263-57bb5902cbd6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlk6l" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.662510 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/279af6f8-0575-4617-9391-8251ab3db6f9-config\") pod \"openshift-apiserver-operator-796bbdcf4f-khdzj\" (UID: \"279af6f8-0575-4617-9391-8251ab3db6f9\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-khdzj" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.662527 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/a62ba26d-f037-478b-8dd1-47ffb968b8a6-images\") pod \"machine-api-operator-5694c8668f-cgt27\" (UID: \"a62ba26d-f037-478b-8dd1-47ffb968b8a6\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-cgt27" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.662545 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-audit-policies\") pod \"oauth-openshift-558db77b4-bw5fp\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.662564 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-bw5fp\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.662585 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qsrzh\" (UniqueName: \"kubernetes.io/projected/680fb251-0f8f-4a72-a386-9ee555576980-kube-api-access-qsrzh\") pod \"machine-approver-56656f9798-4c5bh\" (UID: \"680fb251-0f8f-4a72-a386-9ee555576980\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4c5bh" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.662606 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b17f4af7-2215-4afd-810b-ae1f9a5ca41a-config\") pod \"route-controller-manager-6576b87f9c-qcz9k\" (UID: \"b17f4af7-2215-4afd-810b-ae1f9a5ca41a\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qcz9k" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.662624 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/405cbe2f-c2e9-4d3a-9ed7-cf937eb2cead-serving-cert\") pod \"authentication-operator-69f744f599-lp8vp\" (UID: \"405cbe2f-c2e9-4d3a-9ed7-cf937eb2cead\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-lp8vp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.662847 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mwjdn\" (UniqueName: \"kubernetes.io/projected/521d3dca-6ae7-48f6-a3bc-859493564f8d-kube-api-access-mwjdn\") pod \"controller-manager-879f6c89f-xlz6c\" (UID: \"521d3dca-6ae7-48f6-a3bc-859493564f8d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xlz6c" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.662871 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d4b323b2-0188-4e04-ab45-bb9689a750a2-trusted-ca-bundle\") pod \"console-f9d7485db-92qbd\" (UID: \"d4b323b2-0188-4e04-ab45-bb9689a750a2\") " pod="openshift-console/console-f9d7485db-92qbd" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.662889 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wdt9f\" (UniqueName: \"kubernetes.io/projected/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-kube-api-access-wdt9f\") pod \"oauth-openshift-558db77b4-bw5fp\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.662923 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/521d3dca-6ae7-48f6-a3bc-859493564f8d-config\") pod \"controller-manager-879f6c89f-xlz6c\" (UID: \"521d3dca-6ae7-48f6-a3bc-859493564f8d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xlz6c" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.662943 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/521d3dca-6ae7-48f6-a3bc-859493564f8d-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-xlz6c\" (UID: \"521d3dca-6ae7-48f6-a3bc-859493564f8d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xlz6c" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.662963 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/405cbe2f-c2e9-4d3a-9ed7-cf937eb2cead-service-ca-bundle\") pod \"authentication-operator-69f744f599-lp8vp\" (UID: \"405cbe2f-c2e9-4d3a-9ed7-cf937eb2cead\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-lp8vp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.662980 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-bw5fp\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.662998 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/e72ac95a-ad13-408c-b595-9e983c185119-node-pullsecrets\") pod \"apiserver-76f77b778f-g8wp8\" (UID: \"e72ac95a-ad13-408c-b595-9e983c185119\") " pod="openshift-apiserver/apiserver-76f77b778f-g8wp8" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.663015 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/e72ac95a-ad13-408c-b595-9e983c185119-etcd-client\") pod \"apiserver-76f77b778f-g8wp8\" (UID: \"e72ac95a-ad13-408c-b595-9e983c185119\") " pod="openshift-apiserver/apiserver-76f77b778f-g8wp8" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.663030 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/549f101f-6acf-41be-9263-57bb5902cbd6-etcd-client\") pod \"apiserver-7bbb656c7d-wlk6l\" (UID: \"549f101f-6acf-41be-9263-57bb5902cbd6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlk6l" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.663049 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/549f101f-6acf-41be-9263-57bb5902cbd6-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-wlk6l\" (UID: \"549f101f-6acf-41be-9263-57bb5902cbd6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlk6l" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.663081 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/d4b323b2-0188-4e04-ab45-bb9689a750a2-console-config\") pod \"console-f9d7485db-92qbd\" (UID: \"d4b323b2-0188-4e04-ab45-bb9689a750a2\") " pod="openshift-console/console-f9d7485db-92qbd" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.663103 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/e72ac95a-ad13-408c-b595-9e983c185119-audit\") pod \"apiserver-76f77b778f-g8wp8\" (UID: \"e72ac95a-ad13-408c-b595-9e983c185119\") " pod="openshift-apiserver/apiserver-76f77b778f-g8wp8" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.663129 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vm8gf\" (UniqueName: \"kubernetes.io/projected/e72ac95a-ad13-408c-b595-9e983c185119-kube-api-access-vm8gf\") pod \"apiserver-76f77b778f-g8wp8\" (UID: \"e72ac95a-ad13-408c-b595-9e983c185119\") " pod="openshift-apiserver/apiserver-76f77b778f-g8wp8" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.663151 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xlxt8\" (UniqueName: \"kubernetes.io/projected/b17f4af7-2215-4afd-810b-ae1f9a5ca41a-kube-api-access-xlxt8\") pod \"route-controller-manager-6576b87f9c-qcz9k\" (UID: \"b17f4af7-2215-4afd-810b-ae1f9a5ca41a\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qcz9k" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.663187 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t2s99\" (UniqueName: \"kubernetes.io/projected/d4b323b2-0188-4e04-ab45-bb9689a750a2-kube-api-access-t2s99\") pod \"console-f9d7485db-92qbd\" (UID: \"d4b323b2-0188-4e04-ab45-bb9689a750a2\") " pod="openshift-console/console-f9d7485db-92qbd" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.663211 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-bw5fp\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.663230 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-bw5fp\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.663279 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b17f4af7-2215-4afd-810b-ae1f9a5ca41a-serving-cert\") pod \"route-controller-manager-6576b87f9c-qcz9k\" (UID: \"b17f4af7-2215-4afd-810b-ae1f9a5ca41a\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qcz9k" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.663304 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/d4b323b2-0188-4e04-ab45-bb9689a750a2-console-oauth-config\") pod \"console-f9d7485db-92qbd\" (UID: \"d4b323b2-0188-4e04-ab45-bb9689a750a2\") " pod="openshift-console/console-f9d7485db-92qbd" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.663327 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nmvkd\" (UniqueName: \"kubernetes.io/projected/549f101f-6acf-41be-9263-57bb5902cbd6-kube-api-access-nmvkd\") pod \"apiserver-7bbb656c7d-wlk6l\" (UID: \"549f101f-6acf-41be-9263-57bb5902cbd6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlk6l" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.663342 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/405cbe2f-c2e9-4d3a-9ed7-cf937eb2cead-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-lp8vp\" (UID: \"405cbe2f-c2e9-4d3a-9ed7-cf937eb2cead\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-lp8vp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.663351 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t6qjm\" (UniqueName: \"kubernetes.io/projected/d285a21e-c631-453d-9f22-cbaa397f714a-kube-api-access-t6qjm\") pod \"cluster-image-registry-operator-dc59b4c8b-zld8h\" (UID: \"d285a21e-c631-453d-9f22-cbaa397f714a\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zld8h" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.664837 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/521d3dca-6ae7-48f6-a3bc-859493564f8d-client-ca\") pod \"controller-manager-879f6c89f-xlz6c\" (UID: \"521d3dca-6ae7-48f6-a3bc-859493564f8d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xlz6c" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.664961 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/e72ac95a-ad13-408c-b595-9e983c185119-audit-dir\") pod \"apiserver-76f77b778f-g8wp8\" (UID: \"e72ac95a-ad13-408c-b595-9e983c185119\") " pod="openshift-apiserver/apiserver-76f77b778f-g8wp8" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.664991 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/e72ac95a-ad13-408c-b595-9e983c185119-node-pullsecrets\") pod \"apiserver-76f77b778f-g8wp8\" (UID: \"e72ac95a-ad13-408c-b595-9e983c185119\") " pod="openshift-apiserver/apiserver-76f77b778f-g8wp8" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.665652 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/e72ac95a-ad13-408c-b595-9e983c185119-audit\") pod \"apiserver-76f77b778f-g8wp8\" (UID: \"e72ac95a-ad13-408c-b595-9e983c185119\") " pod="openshift-apiserver/apiserver-76f77b778f-g8wp8" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.665711 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/405cbe2f-c2e9-4d3a-9ed7-cf937eb2cead-config\") pod \"authentication-operator-69f744f599-lp8vp\" (UID: \"405cbe2f-c2e9-4d3a-9ed7-cf937eb2cead\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-lp8vp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.665739 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d285a21e-c631-453d-9f22-cbaa397f714a-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-zld8h\" (UID: \"d285a21e-c631-453d-9f22-cbaa397f714a\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zld8h" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.665760 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a62ba26d-f037-478b-8dd1-47ffb968b8a6-config\") pod \"machine-api-operator-5694c8668f-cgt27\" (UID: \"a62ba26d-f037-478b-8dd1-47ffb968b8a6\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-cgt27" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.665782 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e72ac95a-ad13-408c-b595-9e983c185119-config\") pod \"apiserver-76f77b778f-g8wp8\" (UID: \"e72ac95a-ad13-408c-b595-9e983c185119\") " pod="openshift-apiserver/apiserver-76f77b778f-g8wp8" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.665802 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/602fe93d-2ebf-458c-8dfb-2cd9ba0ce57b-metrics-tls\") pod \"dns-operator-744455d44c-cvs5m\" (UID: \"602fe93d-2ebf-458c-8dfb-2cd9ba0ce57b\") " pod="openshift-dns-operator/dns-operator-744455d44c-cvs5m" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.665820 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/d8a36d28-fc2f-44fc-adca-fe218362ba3a-available-featuregates\") pod \"openshift-config-operator-7777fb866f-kn7v9\" (UID: \"d8a36d28-fc2f-44fc-adca-fe218362ba3a\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-kn7v9" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.665844 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d285a21e-c631-453d-9f22-cbaa397f714a-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-zld8h\" (UID: \"d285a21e-c631-453d-9f22-cbaa397f714a\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zld8h" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.665873 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7zmgk\" (UniqueName: \"kubernetes.io/projected/405cbe2f-c2e9-4d3a-9ed7-cf937eb2cead-kube-api-access-7zmgk\") pod \"authentication-operator-69f744f599-lp8vp\" (UID: \"405cbe2f-c2e9-4d3a-9ed7-cf937eb2cead\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-lp8vp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.666143 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-audit-dir\") pod \"oauth-openshift-558db77b4-bw5fp\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.666167 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/549f101f-6acf-41be-9263-57bb5902cbd6-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-wlk6l\" (UID: \"549f101f-6acf-41be-9263-57bb5902cbd6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlk6l" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.666189 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/680fb251-0f8f-4a72-a386-9ee555576980-auth-proxy-config\") pod \"machine-approver-56656f9798-4c5bh\" (UID: \"680fb251-0f8f-4a72-a386-9ee555576980\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4c5bh" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.666207 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b17f4af7-2215-4afd-810b-ae1f9a5ca41a-client-ca\") pod \"route-controller-manager-6576b87f9c-qcz9k\" (UID: \"b17f4af7-2215-4afd-810b-ae1f9a5ca41a\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qcz9k" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.666225 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/e72ac95a-ad13-408c-b595-9e983c185119-image-import-ca\") pod \"apiserver-76f77b778f-g8wp8\" (UID: \"e72ac95a-ad13-408c-b595-9e983c185119\") " pod="openshift-apiserver/apiserver-76f77b778f-g8wp8" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.666241 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/521d3dca-6ae7-48f6-a3bc-859493564f8d-serving-cert\") pod \"controller-manager-879f6c89f-xlz6c\" (UID: \"521d3dca-6ae7-48f6-a3bc-859493564f8d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xlz6c" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.666260 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-bw5fp\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.666294 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/d4b323b2-0188-4e04-ab45-bb9689a750a2-console-config\") pod \"console-f9d7485db-92qbd\" (UID: \"d4b323b2-0188-4e04-ab45-bb9689a750a2\") " pod="openshift-console/console-f9d7485db-92qbd" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.666370 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8880753d-187b-4b18-a292-43ed561b6d8d-trusted-ca\") pod \"console-operator-58897d9998-zf7sb\" (UID: \"8880753d-187b-4b18-a292-43ed561b6d8d\") " pod="openshift-console-operator/console-operator-58897d9998-zf7sb" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.667010 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/d4b323b2-0188-4e04-ab45-bb9689a750a2-oauth-serving-cert\") pod \"console-f9d7485db-92qbd\" (UID: \"d4b323b2-0188-4e04-ab45-bb9689a750a2\") " pod="openshift-console/console-f9d7485db-92qbd" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.667036 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.667728 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b17f4af7-2215-4afd-810b-ae1f9a5ca41a-config\") pod \"route-controller-manager-6576b87f9c-qcz9k\" (UID: \"b17f4af7-2215-4afd-810b-ae1f9a5ca41a\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qcz9k" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.669113 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/521d3dca-6ae7-48f6-a3bc-859493564f8d-config\") pod \"controller-manager-879f6c89f-xlz6c\" (UID: \"521d3dca-6ae7-48f6-a3bc-859493564f8d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xlz6c" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.671496 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/680fb251-0f8f-4a72-a386-9ee555576980-config\") pod \"machine-approver-56656f9798-4c5bh\" (UID: \"680fb251-0f8f-4a72-a386-9ee555576980\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4c5bh" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.671513 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/d4b323b2-0188-4e04-ab45-bb9689a750a2-service-ca\") pod \"console-f9d7485db-92qbd\" (UID: \"d4b323b2-0188-4e04-ab45-bb9689a750a2\") " pod="openshift-console/console-f9d7485db-92qbd" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.671525 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b17f4af7-2215-4afd-810b-ae1f9a5ca41a-client-ca\") pod \"route-controller-manager-6576b87f9c-qcz9k\" (UID: \"b17f4af7-2215-4afd-810b-ae1f9a5ca41a\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qcz9k" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.672366 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8880753d-187b-4b18-a292-43ed561b6d8d-config\") pod \"console-operator-58897d9998-zf7sb\" (UID: \"8880753d-187b-4b18-a292-43ed561b6d8d\") " pod="openshift-console-operator/console-operator-58897d9998-zf7sb" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.672655 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/680fb251-0f8f-4a72-a386-9ee555576980-auth-proxy-config\") pod \"machine-approver-56656f9798-4c5bh\" (UID: \"680fb251-0f8f-4a72-a386-9ee555576980\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4c5bh" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.673192 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d4b323b2-0188-4e04-ab45-bb9689a750a2-trusted-ca-bundle\") pod \"console-f9d7485db-92qbd\" (UID: \"d4b323b2-0188-4e04-ab45-bb9689a750a2\") " pod="openshift-console/console-f9d7485db-92qbd" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.674425 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.674718 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e72ac95a-ad13-408c-b595-9e983c185119-config\") pod \"apiserver-76f77b778f-g8wp8\" (UID: \"e72ac95a-ad13-408c-b595-9e983c185119\") " pod="openshift-apiserver/apiserver-76f77b778f-g8wp8" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.675135 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/405cbe2f-c2e9-4d3a-9ed7-cf937eb2cead-service-ca-bundle\") pod \"authentication-operator-69f744f599-lp8vp\" (UID: \"405cbe2f-c2e9-4d3a-9ed7-cf937eb2cead\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-lp8vp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.675530 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/521d3dca-6ae7-48f6-a3bc-859493564f8d-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-xlz6c\" (UID: \"521d3dca-6ae7-48f6-a3bc-859493564f8d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xlz6c" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.675854 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e72ac95a-ad13-408c-b595-9e983c185119-trusted-ca-bundle\") pod \"apiserver-76f77b778f-g8wp8\" (UID: \"e72ac95a-ad13-408c-b595-9e983c185119\") " pod="openshift-apiserver/apiserver-76f77b778f-g8wp8" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.676077 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/279af6f8-0575-4617-9391-8251ab3db6f9-config\") pod \"openshift-apiserver-operator-796bbdcf4f-khdzj\" (UID: \"279af6f8-0575-4617-9391-8251ab3db6f9\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-khdzj" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.677447 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/e72ac95a-ad13-408c-b595-9e983c185119-etcd-client\") pod \"apiserver-76f77b778f-g8wp8\" (UID: \"e72ac95a-ad13-408c-b595-9e983c185119\") " pod="openshift-apiserver/apiserver-76f77b778f-g8wp8" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.677740 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/d4b323b2-0188-4e04-ab45-bb9689a750a2-console-oauth-config\") pod \"console-f9d7485db-92qbd\" (UID: \"d4b323b2-0188-4e04-ab45-bb9689a750a2\") " pod="openshift-console/console-f9d7485db-92qbd" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.677764 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8880753d-187b-4b18-a292-43ed561b6d8d-serving-cert\") pod \"console-operator-58897d9998-zf7sb\" (UID: \"8880753d-187b-4b18-a292-43ed561b6d8d\") " pod="openshift-console-operator/console-operator-58897d9998-zf7sb" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.677753 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/405cbe2f-c2e9-4d3a-9ed7-cf937eb2cead-config\") pod \"authentication-operator-69f744f599-lp8vp\" (UID: \"405cbe2f-c2e9-4d3a-9ed7-cf937eb2cead\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-lp8vp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.678434 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/d4b323b2-0188-4e04-ab45-bb9689a750a2-console-serving-cert\") pod \"console-f9d7485db-92qbd\" (UID: \"d4b323b2-0188-4e04-ab45-bb9689a750a2\") " pod="openshift-console/console-f9d7485db-92qbd" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.678516 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/521d3dca-6ae7-48f6-a3bc-859493564f8d-serving-cert\") pod \"controller-manager-879f6c89f-xlz6c\" (UID: \"521d3dca-6ae7-48f6-a3bc-859493564f8d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xlz6c" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.678833 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e72ac95a-ad13-408c-b595-9e983c185119-serving-cert\") pod \"apiserver-76f77b778f-g8wp8\" (UID: \"e72ac95a-ad13-408c-b595-9e983c185119\") " pod="openshift-apiserver/apiserver-76f77b778f-g8wp8" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.681522 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/680fb251-0f8f-4a72-a386-9ee555576980-machine-approver-tls\") pod \"machine-approver-56656f9798-4c5bh\" (UID: \"680fb251-0f8f-4a72-a386-9ee555576980\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4c5bh" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.681987 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/279af6f8-0575-4617-9391-8251ab3db6f9-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-khdzj\" (UID: \"279af6f8-0575-4617-9391-8251ab3db6f9\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-khdzj" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.684259 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vm8gf\" (UniqueName: \"kubernetes.io/projected/e72ac95a-ad13-408c-b595-9e983c185119-kube-api-access-vm8gf\") pod \"apiserver-76f77b778f-g8wp8\" (UID: \"e72ac95a-ad13-408c-b595-9e983c185119\") " pod="openshift-apiserver/apiserver-76f77b778f-g8wp8" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.691317 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/e72ac95a-ad13-408c-b595-9e983c185119-encryption-config\") pod \"apiserver-76f77b778f-g8wp8\" (UID: \"e72ac95a-ad13-408c-b595-9e983c185119\") " pod="openshift-apiserver/apiserver-76f77b778f-g8wp8" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.696696 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qsrzh\" (UniqueName: \"kubernetes.io/projected/680fb251-0f8f-4a72-a386-9ee555576980-kube-api-access-qsrzh\") pod \"machine-approver-56656f9798-4c5bh\" (UID: \"680fb251-0f8f-4a72-a386-9ee555576980\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4c5bh" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.697289 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/8c09c820-79c0-4e63-b063-2f01381c96fd-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-xcc8f\" (UID: \"8c09c820-79c0-4e63-b063-2f01381c96fd\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-xcc8f" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.697766 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-prd8h\" (UniqueName: \"kubernetes.io/projected/8c09c820-79c0-4e63-b063-2f01381c96fd-kube-api-access-prd8h\") pod \"cluster-samples-operator-665b6dd947-xcc8f\" (UID: \"8c09c820-79c0-4e63-b063-2f01381c96fd\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-xcc8f" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.698927 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b17f4af7-2215-4afd-810b-ae1f9a5ca41a-serving-cert\") pod \"route-controller-manager-6576b87f9c-qcz9k\" (UID: \"b17f4af7-2215-4afd-810b-ae1f9a5ca41a\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qcz9k" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.703174 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/405cbe2f-c2e9-4d3a-9ed7-cf937eb2cead-serving-cert\") pod \"authentication-operator-69f744f599-lp8vp\" (UID: \"405cbe2f-c2e9-4d3a-9ed7-cf937eb2cead\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-lp8vp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.721323 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mwjdn\" (UniqueName: \"kubernetes.io/projected/521d3dca-6ae7-48f6-a3bc-859493564f8d-kube-api-access-mwjdn\") pod \"controller-manager-879f6c89f-xlz6c\" (UID: \"521d3dca-6ae7-48f6-a3bc-859493564f8d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xlz6c" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.738118 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7zmgk\" (UniqueName: \"kubernetes.io/projected/405cbe2f-c2e9-4d3a-9ed7-cf937eb2cead-kube-api-access-7zmgk\") pod \"authentication-operator-69f744f599-lp8vp\" (UID: \"405cbe2f-c2e9-4d3a-9ed7-cf937eb2cead\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-lp8vp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.755940 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-52tkb\" (UniqueName: \"kubernetes.io/projected/279af6f8-0575-4617-9391-8251ab3db6f9-kube-api-access-52tkb\") pod \"openshift-apiserver-operator-796bbdcf4f-khdzj\" (UID: \"279af6f8-0575-4617-9391-8251ab3db6f9\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-khdzj" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.767783 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/549f101f-6acf-41be-9263-57bb5902cbd6-serving-cert\") pod \"apiserver-7bbb656c7d-wlk6l\" (UID: \"549f101f-6acf-41be-9263-57bb5902cbd6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlk6l" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.767840 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zvj75\" (UniqueName: \"kubernetes.io/projected/1eae8258-0ffa-4aad-9ac4-747259f4cae0-kube-api-access-zvj75\") pod \"downloads-7954f5f757-vzlcs\" (UID: \"1eae8258-0ffa-4aad-9ac4-747259f4cae0\") " pod="openshift-console/downloads-7954f5f757-vzlcs" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.767869 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d7vxc\" (UniqueName: \"kubernetes.io/projected/a62ba26d-f037-478b-8dd1-47ffb968b8a6-kube-api-access-d7vxc\") pod \"machine-api-operator-5694c8668f-cgt27\" (UID: \"a62ba26d-f037-478b-8dd1-47ffb968b8a6\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-cgt27" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.767963 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/549f101f-6acf-41be-9263-57bb5902cbd6-audit-policies\") pod \"apiserver-7bbb656c7d-wlk6l\" (UID: \"549f101f-6acf-41be-9263-57bb5902cbd6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlk6l" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.768057 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-bw5fp\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.768096 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vpj58\" (UniqueName: \"kubernetes.io/projected/d8a36d28-fc2f-44fc-adca-fe218362ba3a-kube-api-access-vpj58\") pod \"openshift-config-operator-7777fb866f-kn7v9\" (UID: \"d8a36d28-fc2f-44fc-adca-fe218362ba3a\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-kn7v9" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.768126 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/549f101f-6acf-41be-9263-57bb5902cbd6-encryption-config\") pod \"apiserver-7bbb656c7d-wlk6l\" (UID: \"549f101f-6acf-41be-9263-57bb5902cbd6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlk6l" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.768160 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/a62ba26d-f037-478b-8dd1-47ffb968b8a6-images\") pod \"machine-api-operator-5694c8668f-cgt27\" (UID: \"a62ba26d-f037-478b-8dd1-47ffb968b8a6\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-cgt27" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.768190 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-audit-policies\") pod \"oauth-openshift-558db77b4-bw5fp\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.768219 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-bw5fp\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.768323 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wdt9f\" (UniqueName: \"kubernetes.io/projected/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-kube-api-access-wdt9f\") pod \"oauth-openshift-558db77b4-bw5fp\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.768355 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-bw5fp\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.768384 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/549f101f-6acf-41be-9263-57bb5902cbd6-etcd-client\") pod \"apiserver-7bbb656c7d-wlk6l\" (UID: \"549f101f-6acf-41be-9263-57bb5902cbd6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlk6l" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.768418 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/549f101f-6acf-41be-9263-57bb5902cbd6-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-wlk6l\" (UID: \"549f101f-6acf-41be-9263-57bb5902cbd6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlk6l" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.768527 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-bw5fp\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.768558 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-bw5fp\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.768621 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nmvkd\" (UniqueName: \"kubernetes.io/projected/549f101f-6acf-41be-9263-57bb5902cbd6-kube-api-access-nmvkd\") pod \"apiserver-7bbb656c7d-wlk6l\" (UID: \"549f101f-6acf-41be-9263-57bb5902cbd6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlk6l" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.768655 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t6qjm\" (UniqueName: \"kubernetes.io/projected/d285a21e-c631-453d-9f22-cbaa397f714a-kube-api-access-t6qjm\") pod \"cluster-image-registry-operator-dc59b4c8b-zld8h\" (UID: \"d285a21e-c631-453d-9f22-cbaa397f714a\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zld8h" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.768687 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d285a21e-c631-453d-9f22-cbaa397f714a-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-zld8h\" (UID: \"d285a21e-c631-453d-9f22-cbaa397f714a\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zld8h" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.768713 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a62ba26d-f037-478b-8dd1-47ffb968b8a6-config\") pod \"machine-api-operator-5694c8668f-cgt27\" (UID: \"a62ba26d-f037-478b-8dd1-47ffb968b8a6\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-cgt27" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.768740 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/602fe93d-2ebf-458c-8dfb-2cd9ba0ce57b-metrics-tls\") pod \"dns-operator-744455d44c-cvs5m\" (UID: \"602fe93d-2ebf-458c-8dfb-2cd9ba0ce57b\") " pod="openshift-dns-operator/dns-operator-744455d44c-cvs5m" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.768774 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/d8a36d28-fc2f-44fc-adca-fe218362ba3a-available-featuregates\") pod \"openshift-config-operator-7777fb866f-kn7v9\" (UID: \"d8a36d28-fc2f-44fc-adca-fe218362ba3a\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-kn7v9" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.768802 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d285a21e-c631-453d-9f22-cbaa397f714a-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-zld8h\" (UID: \"d285a21e-c631-453d-9f22-cbaa397f714a\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zld8h" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.768838 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-audit-dir\") pod \"oauth-openshift-558db77b4-bw5fp\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.768863 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/549f101f-6acf-41be-9263-57bb5902cbd6-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-wlk6l\" (UID: \"549f101f-6acf-41be-9263-57bb5902cbd6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlk6l" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.768950 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-bw5fp\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.769017 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/a62ba26d-f037-478b-8dd1-47ffb968b8a6-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-cgt27\" (UID: \"a62ba26d-f037-478b-8dd1-47ffb968b8a6\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-cgt27" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.769041 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d8a36d28-fc2f-44fc-adca-fe218362ba3a-serving-cert\") pod \"openshift-config-operator-7777fb866f-kn7v9\" (UID: \"d8a36d28-fc2f-44fc-adca-fe218362ba3a\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-kn7v9" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.769068 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-bw5fp\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.769097 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/d285a21e-c631-453d-9f22-cbaa397f714a-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-zld8h\" (UID: \"d285a21e-c631-453d-9f22-cbaa397f714a\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zld8h" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.769121 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mrv9p\" (UniqueName: \"kubernetes.io/projected/602fe93d-2ebf-458c-8dfb-2cd9ba0ce57b-kube-api-access-mrv9p\") pod \"dns-operator-744455d44c-cvs5m\" (UID: \"602fe93d-2ebf-458c-8dfb-2cd9ba0ce57b\") " pod="openshift-dns-operator/dns-operator-744455d44c-cvs5m" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.769147 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/549f101f-6acf-41be-9263-57bb5902cbd6-audit-dir\") pod \"apiserver-7bbb656c7d-wlk6l\" (UID: \"549f101f-6acf-41be-9263-57bb5902cbd6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlk6l" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.769171 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-bw5fp\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.769194 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-bw5fp\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.769223 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-bw5fp\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.769263 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-bw5fp\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.770315 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-bw5fp\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.770394 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-audit-policies\") pod \"oauth-openshift-558db77b4-bw5fp\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.770549 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/a62ba26d-f037-478b-8dd1-47ffb968b8a6-images\") pod \"machine-api-operator-5694c8668f-cgt27\" (UID: \"a62ba26d-f037-478b-8dd1-47ffb968b8a6\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-cgt27" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.770785 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/d8a36d28-fc2f-44fc-adca-fe218362ba3a-available-featuregates\") pod \"openshift-config-operator-7777fb866f-kn7v9\" (UID: \"d8a36d28-fc2f-44fc-adca-fe218362ba3a\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-kn7v9" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.770873 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/549f101f-6acf-41be-9263-57bb5902cbd6-audit-policies\") pod \"apiserver-7bbb656c7d-wlk6l\" (UID: \"549f101f-6acf-41be-9263-57bb5902cbd6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlk6l" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.771892 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/549f101f-6acf-41be-9263-57bb5902cbd6-serving-cert\") pod \"apiserver-7bbb656c7d-wlk6l\" (UID: \"549f101f-6acf-41be-9263-57bb5902cbd6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlk6l" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.772476 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-audit-dir\") pod \"oauth-openshift-558db77b4-bw5fp\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.772610 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-bw5fp\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.772789 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/549f101f-6acf-41be-9263-57bb5902cbd6-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-wlk6l\" (UID: \"549f101f-6acf-41be-9263-57bb5902cbd6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlk6l" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.772888 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a62ba26d-f037-478b-8dd1-47ffb968b8a6-config\") pod \"machine-api-operator-5694c8668f-cgt27\" (UID: \"a62ba26d-f037-478b-8dd1-47ffb968b8a6\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-cgt27" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.773364 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/549f101f-6acf-41be-9263-57bb5902cbd6-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-wlk6l\" (UID: \"549f101f-6acf-41be-9263-57bb5902cbd6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlk6l" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.771847 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-bw5fp\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.773793 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/602fe93d-2ebf-458c-8dfb-2cd9ba0ce57b-metrics-tls\") pod \"dns-operator-744455d44c-cvs5m\" (UID: \"602fe93d-2ebf-458c-8dfb-2cd9ba0ce57b\") " pod="openshift-dns-operator/dns-operator-744455d44c-cvs5m" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.773866 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/549f101f-6acf-41be-9263-57bb5902cbd6-audit-dir\") pod \"apiserver-7bbb656c7d-wlk6l\" (UID: \"549f101f-6acf-41be-9263-57bb5902cbd6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlk6l" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.774475 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-bw5fp\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.775121 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-bw5fp\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.775124 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-bw5fp\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.776206 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d8a36d28-fc2f-44fc-adca-fe218362ba3a-serving-cert\") pod \"openshift-config-operator-7777fb866f-kn7v9\" (UID: \"d8a36d28-fc2f-44fc-adca-fe218362ba3a\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-kn7v9" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.776824 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-bw5fp\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.776870 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/d285a21e-c631-453d-9f22-cbaa397f714a-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-zld8h\" (UID: \"d285a21e-c631-453d-9f22-cbaa397f714a\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zld8h" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.777005 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-bw5fp\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.777139 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/a62ba26d-f037-478b-8dd1-47ffb968b8a6-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-cgt27\" (UID: \"a62ba26d-f037-478b-8dd1-47ffb968b8a6\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-cgt27" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.777521 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-bw5fp\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.777601 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xlxt8\" (UniqueName: \"kubernetes.io/projected/b17f4af7-2215-4afd-810b-ae1f9a5ca41a-kube-api-access-xlxt8\") pod \"route-controller-manager-6576b87f9c-qcz9k\" (UID: \"b17f4af7-2215-4afd-810b-ae1f9a5ca41a\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qcz9k" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.777635 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-bw5fp\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.778641 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-bw5fp\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.797246 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t2s99\" (UniqueName: \"kubernetes.io/projected/d4b323b2-0188-4e04-ab45-bb9689a750a2-kube-api-access-t2s99\") pod \"console-f9d7485db-92qbd\" (UID: \"d4b323b2-0188-4e04-ab45-bb9689a750a2\") " pod="openshift-console/console-f9d7485db-92qbd" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.819157 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z8g55\" (UniqueName: \"kubernetes.io/projected/8880753d-187b-4b18-a292-43ed561b6d8d-kube-api-access-z8g55\") pod \"console-operator-58897d9998-zf7sb\" (UID: \"8880753d-187b-4b18-a292-43ed561b6d8d\") " pod="openshift-console-operator/console-operator-58897d9998-zf7sb" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.825752 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-xlz6c" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.850688 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-lp8vp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.859951 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zvj75\" (UniqueName: \"kubernetes.io/projected/1eae8258-0ffa-4aad-9ac4-747259f4cae0-kube-api-access-zvj75\") pod \"downloads-7954f5f757-vzlcs\" (UID: \"1eae8258-0ffa-4aad-9ac4-747259f4cae0\") " pod="openshift-console/downloads-7954f5f757-vzlcs" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.879080 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d7vxc\" (UniqueName: \"kubernetes.io/projected/a62ba26d-f037-478b-8dd1-47ffb968b8a6-kube-api-access-d7vxc\") pod \"machine-api-operator-5694c8668f-cgt27\" (UID: \"a62ba26d-f037-478b-8dd1-47ffb968b8a6\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-cgt27" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.898827 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vpj58\" (UniqueName: \"kubernetes.io/projected/d8a36d28-fc2f-44fc-adca-fe218362ba3a-kube-api-access-vpj58\") pod \"openshift-config-operator-7777fb866f-kn7v9\" (UID: \"d8a36d28-fc2f-44fc-adca-fe218362ba3a\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-kn7v9" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.903277 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-xcc8f" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.913104 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-zf7sb" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.923560 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-khdzj" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.950737 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4c5bh" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.959600 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wdt9f\" (UniqueName: \"kubernetes.io/projected/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-kube-api-access-wdt9f\") pod \"oauth-openshift-558db77b4-bw5fp\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.961386 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qcz9k" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.972396 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-92qbd" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.976412 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mrv9p\" (UniqueName: \"kubernetes.io/projected/602fe93d-2ebf-458c-8dfb-2cd9ba0ce57b-kube-api-access-mrv9p\") pod \"dns-operator-744455d44c-cvs5m\" (UID: \"602fe93d-2ebf-458c-8dfb-2cd9ba0ce57b\") " pod="openshift-dns-operator/dns-operator-744455d44c-cvs5m" Jan 21 15:26:43 crc kubenswrapper[5021]: I0121 15:26:43.996673 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nmvkd\" (UniqueName: \"kubernetes.io/projected/549f101f-6acf-41be-9263-57bb5902cbd6-kube-api-access-nmvkd\") pod \"apiserver-7bbb656c7d-wlk6l\" (UID: \"549f101f-6acf-41be-9263-57bb5902cbd6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlk6l" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.016011 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-cgt27" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.047099 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-kn7v9" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.057406 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.063056 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-vzlcs" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.078365 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-cvs5m" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.671937 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/e72ac95a-ad13-408c-b595-9e983c185119-image-import-ca\") pod \"apiserver-76f77b778f-g8wp8\" (UID: \"e72ac95a-ad13-408c-b595-9e983c185119\") " pod="openshift-apiserver/apiserver-76f77b778f-g8wp8" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.673672 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d285a21e-c631-453d-9f22-cbaa397f714a-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-zld8h\" (UID: \"d285a21e-c631-453d-9f22-cbaa397f714a\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zld8h" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.676520 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/e72ac95a-ad13-408c-b595-9e983c185119-etcd-serving-ca\") pod \"apiserver-76f77b778f-g8wp8\" (UID: \"e72ac95a-ad13-408c-b595-9e983c185119\") " pod="openshift-apiserver/apiserver-76f77b778f-g8wp8" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.676789 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/549f101f-6acf-41be-9263-57bb5902cbd6-encryption-config\") pod \"apiserver-7bbb656c7d-wlk6l\" (UID: \"549f101f-6acf-41be-9263-57bb5902cbd6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlk6l" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.681994 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t6qjm\" (UniqueName: \"kubernetes.io/projected/d285a21e-c631-453d-9f22-cbaa397f714a-kube-api-access-t6qjm\") pod \"cluster-image-registry-operator-dc59b4c8b-zld8h\" (UID: \"d285a21e-c631-453d-9f22-cbaa397f714a\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zld8h" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.683355 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.683625 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.684366 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d285a21e-c631-453d-9f22-cbaa397f714a-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-zld8h\" (UID: \"d285a21e-c631-453d-9f22-cbaa397f714a\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zld8h" Jan 21 15:26:44 crc kubenswrapper[5021]: E0121 15:26:44.684384 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:28:46.684363289 +0000 UTC m=+268.219477348 (durationBeforeRetry 2m2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.688651 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/549f101f-6acf-41be-9263-57bb5902cbd6-etcd-client\") pod \"apiserver-7bbb656c7d-wlk6l\" (UID: \"549f101f-6acf-41be-9263-57bb5902cbd6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlk6l" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.688660 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.734095 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-vpddf"] Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.735051 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-vpddf" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.739879 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-zmqhz"] Jan 21 15:26:44 crc kubenswrapper[5021]: W0121 15:26:44.741602 5021 reflector.go:561] object-"openshift-dns"/"dns-default": failed to list *v1.ConfigMap: configmaps "dns-default" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-dns": no relationship found between node 'crc' and this object Jan 21 15:26:44 crc kubenswrapper[5021]: E0121 15:26:44.741683 5021 reflector.go:158] "Unhandled Error" err="object-\"openshift-dns\"/\"dns-default\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"dns-default\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-dns\": no relationship found between node 'crc' and this object" logger="UnhandledError" Jan 21 15:26:44 crc kubenswrapper[5021]: W0121 15:26:44.741757 5021 reflector.go:561] object-"openshift-dns"/"dns-default-metrics-tls": failed to list *v1.Secret: secrets "dns-default-metrics-tls" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-dns": no relationship found between node 'crc' and this object Jan 21 15:26:44 crc kubenswrapper[5021]: E0121 15:26:44.741775 5021 reflector.go:158] "Unhandled Error" err="object-\"openshift-dns\"/\"dns-default-metrics-tls\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"dns-default-metrics-tls\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-dns\": no relationship found between node 'crc' and this object" logger="UnhandledError" Jan 21 15:26:44 crc kubenswrapper[5021]: W0121 15:26:44.741830 5021 reflector.go:561] object-"openshift-dns"/"dns-dockercfg-jwfmh": failed to list *v1.Secret: secrets "dns-dockercfg-jwfmh" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-dns": no relationship found between node 'crc' and this object Jan 21 15:26:44 crc kubenswrapper[5021]: E0121 15:26:44.741845 5021 reflector.go:158] "Unhandled Error" err="object-\"openshift-dns\"/\"dns-dockercfg-jwfmh\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"dns-dockercfg-jwfmh\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-dns\": no relationship found between node 'crc' and this object" logger="UnhandledError" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.750093 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.756448 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.759116 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.779665 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.784970 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.785021 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wctv7\" (UniqueName: \"kubernetes.io/projected/f384f01c-b331-4051-aef7-8da3fbbad2ab-kube-api-access-wctv7\") pod \"dns-default-vpddf\" (UID: \"f384f01c-b331-4051-aef7-8da3fbbad2ab\") " pod="openshift-dns/dns-default-vpddf" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.785057 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.785075 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f384f01c-b331-4051-aef7-8da3fbbad2ab-config-volume\") pod \"dns-default-vpddf\" (UID: \"f384f01c-b331-4051-aef7-8da3fbbad2ab\") " pod="openshift-dns/dns-default-vpddf" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.785106 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.785124 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/f384f01c-b331-4051-aef7-8da3fbbad2ab-metrics-tls\") pod \"dns-default-vpddf\" (UID: \"f384f01c-b331-4051-aef7-8da3fbbad2ab\") " pod="openshift-dns/dns-default-vpddf" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.789407 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.789436 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.791290 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-g8wp8" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.804771 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-m6vx8"] Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.805616 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-6qfmg"] Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.805889 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.805983 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-kmdn7"] Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.806282 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-m6vx8" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.806331 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-m2brn"] Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.806520 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-6qfmg" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.807744 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-kmdn7" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.811929 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-ntg54"] Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.812430 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hwzmx"] Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.812622 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-m2brn" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.812785 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-hd2pm"] Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.813262 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-58trd"] Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.813527 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-4459k"] Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.813801 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-p5qnh"] Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.814457 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hwzmx" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.814604 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ntg54" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.814717 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-58trd" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.814783 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-k8lpr"] Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.815077 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-phhx7"] Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.815183 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.815363 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.815373 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-64lkf"] Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.815522 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.815676 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.815762 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-7t8pz"] Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.815818 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.816220 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-gm5rm"] Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.816682 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-w8nsc"] Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.817108 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-hd2pm" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.817598 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-phhx7" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.817651 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-64lkf" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.817700 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-7t8pz" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.817736 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-gm5rm" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.819859 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-k8lpr" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.819977 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-w8nsc" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.820015 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-4459k" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.820048 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-p5qnh" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.831737 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-9fhn9"] Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.832616 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-9fhn9" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.832943 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-gglqm"] Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.836656 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-gglqm" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.851213 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.851569 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.859035 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-jm4qh"] Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.859353 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.859801 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.859975 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.860131 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.860298 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.860390 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.860437 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.860611 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.860682 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-skc42"] Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.860853 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.860973 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.861233 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.861553 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.861832 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.862072 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.862102 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.862255 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.862319 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-jm4qh" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.862357 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.862479 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.862648 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.861703 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-skc42" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.863351 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.863747 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.868136 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.874561 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.874826 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.883289 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.886948 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-hkbvp"] Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.897515 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e1a2d469-35da-4253-b3a5-057b68c4d68b-trusted-ca\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.897574 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/e1a2d469-35da-4253-b3a5-057b68c4d68b-ca-trust-extracted\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.897643 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wctv7\" (UniqueName: \"kubernetes.io/projected/f384f01c-b331-4051-aef7-8da3fbbad2ab-kube-api-access-wctv7\") pod \"dns-default-vpddf\" (UID: \"f384f01c-b331-4051-aef7-8da3fbbad2ab\") " pod="openshift-dns/dns-default-vpddf" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.897662 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/e1a2d469-35da-4253-b3a5-057b68c4d68b-installation-pull-secrets\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.897690 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rj5gj\" (UniqueName: \"kubernetes.io/projected/e1a2d469-35da-4253-b3a5-057b68c4d68b-kube-api-access-rj5gj\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.897723 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.897739 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/e1a2d469-35da-4253-b3a5-057b68c4d68b-registry-certificates\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.897781 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/e1a2d469-35da-4253-b3a5-057b68c4d68b-bound-sa-token\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.897806 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f384f01c-b331-4051-aef7-8da3fbbad2ab-config-volume\") pod \"dns-default-vpddf\" (UID: \"f384f01c-b331-4051-aef7-8da3fbbad2ab\") " pod="openshift-dns/dns-default-vpddf" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.897847 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/f384f01c-b331-4051-aef7-8da3fbbad2ab-metrics-tls\") pod \"dns-default-vpddf\" (UID: \"f384f01c-b331-4051-aef7-8da3fbbad2ab\") " pod="openshift-dns/dns-default-vpddf" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.897877 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/e1a2d469-35da-4253-b3a5-057b68c4d68b-registry-tls\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.898400 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-hkbvp" Jan 21 15:26:44 crc kubenswrapper[5021]: E0121 15:26:44.898590 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:45.398554276 +0000 UTC m=+146.933668165 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.900661 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-pkmbf"] Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.905653 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.906120 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483475-7s6jf"] Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.906933 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-xq97s"] Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.907457 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-m6vx8"] Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.907493 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-58trd"] Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.907598 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-xq97s" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.908067 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-pkmbf" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.908634 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483475-7s6jf" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.909007 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-p5qnh"] Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.910404 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-ntg54"] Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.914410 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-64lkf"] Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.914435 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-vpddf"] Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.914447 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-phhx7"] Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.917835 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hwzmx"] Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.917867 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-zmqhz"] Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.919417 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-kmdn7"] Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.922308 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-gm5rm"] Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.925226 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-skc42"] Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.925280 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.926783 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-k8lpr"] Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.927374 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-hd2pm"] Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.932017 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlk6l" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.938327 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.938741 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zld8h" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.943285 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.946950 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-7t8pz"] Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.948918 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-9fhn9"] Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.950519 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483475-7s6jf"] Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.951572 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-jm4qh"] Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.952789 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-xq97s"] Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.955629 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-gglqm"] Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.960801 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-w8nsc"] Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.963331 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.964022 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.964208 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-m2brn"] Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.965646 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-6qfmg"] Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.971084 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-pkmbf"] Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.985090 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.999514 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.999764 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/d5642370-ee34-4ee6-8ae8-0951768da987-proxy-tls\") pod \"machine-config-controller-84d6567774-ntg54\" (UID: \"d5642370-ee34-4ee6-8ae8-0951768da987\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ntg54" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.999790 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7pphp\" (UniqueName: \"kubernetes.io/projected/71337f59-d4e5-47da-9d8e-759bd17cfdc3-kube-api-access-7pphp\") pod \"collect-profiles-29483475-7s6jf\" (UID: \"71337f59-d4e5-47da-9d8e-759bd17cfdc3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483475-7s6jf" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.999810 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c490e95d-e462-45b2-8352-9603283319e1-default-certificate\") pod \"router-default-5444994796-4459k\" (UID: \"c490e95d-e462-45b2-8352-9603283319e1\") " pod="openshift-ingress/router-default-5444994796-4459k" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.999825 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c490e95d-e462-45b2-8352-9603283319e1-stats-auth\") pod \"router-default-5444994796-4459k\" (UID: \"c490e95d-e462-45b2-8352-9603283319e1\") " pod="openshift-ingress/router-default-5444994796-4459k" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.999843 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7146e72d-adb4-4283-aad0-8ed4b6363be9-config\") pod \"kube-apiserver-operator-766d6c64bb-kmdn7\" (UID: \"7146e72d-adb4-4283-aad0-8ed4b6363be9\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-kmdn7" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.999861 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/0654cecd-38e1-4678-9452-5e8b8b1dd07f-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-gm5rm\" (UID: \"0654cecd-38e1-4678-9452-5e8b8b1dd07f\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-gm5rm" Jan 21 15:26:44 crc kubenswrapper[5021]: I0121 15:26:44.999879 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/13da1899-cf91-483d-99ea-7c6aa673e4f7-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-hwzmx\" (UID: \"13da1899-cf91-483d-99ea-7c6aa673e4f7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hwzmx" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:44.999895 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/8fb86530-c6d5-4e68-960b-2eadac1c8973-webhook-cert\") pod \"packageserver-d55dfcdfc-w8nsc\" (UID: \"8fb86530-c6d5-4e68-960b-2eadac1c8973\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-w8nsc" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:44.999935 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4v4mq\" (UniqueName: \"kubernetes.io/projected/af171da1-a5e5-4811-8621-3acb113bb571-kube-api-access-4v4mq\") pod \"openshift-controller-manager-operator-756b6f6bc6-k8lpr\" (UID: \"af171da1-a5e5-4811-8621-3acb113bb571\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-k8lpr" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:44.999955 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/256b478f-1967-4699-857d-bc41f69654f2-auth-proxy-config\") pod \"machine-config-operator-74547568cd-m6vx8\" (UID: \"256b478f-1967-4699-857d-bc41f69654f2\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-m6vx8" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:44.999970 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cdd0f7b7-d192-425e-87ad-0ec36f050bd7-config\") pod \"etcd-operator-b45778765-6qfmg\" (UID: \"cdd0f7b7-d192-425e-87ad-0ec36f050bd7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6qfmg" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:44.999986 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/cdd0f7b7-d192-425e-87ad-0ec36f050bd7-etcd-client\") pod \"etcd-operator-b45778765-6qfmg\" (UID: \"cdd0f7b7-d192-425e-87ad-0ec36f050bd7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6qfmg" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.000002 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/256b478f-1967-4699-857d-bc41f69654f2-images\") pod \"machine-config-operator-74547568cd-m6vx8\" (UID: \"256b478f-1967-4699-857d-bc41f69654f2\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-m6vx8" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.000017 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c0332d4a-fb12-4d96-ae36-bb7295b28a87-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-m2brn\" (UID: \"c0332d4a-fb12-4d96-ae36-bb7295b28a87\") " pod="openshift-marketplace/marketplace-operator-79b997595-m2brn" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.000059 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e1a2d469-35da-4253-b3a5-057b68c4d68b-trusted-ca\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.000086 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-278gw\" (UniqueName: \"kubernetes.io/projected/ad524c85-51c3-47c1-b649-eca2cacdf4a6-kube-api-access-278gw\") pod \"machine-config-server-hkbvp\" (UID: \"ad524c85-51c3-47c1-b649-eca2cacdf4a6\") " pod="openshift-machine-config-operator/machine-config-server-hkbvp" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.000117 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/c0332d4a-fb12-4d96-ae36-bb7295b28a87-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-m2brn\" (UID: \"c0332d4a-fb12-4d96-ae36-bb7295b28a87\") " pod="openshift-marketplace/marketplace-operator-79b997595-m2brn" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.000152 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xtkpx\" (UniqueName: \"kubernetes.io/projected/f90d4209-f294-478a-85fd-42c8e91bd6aa-kube-api-access-xtkpx\") pod \"service-ca-operator-777779d784-jm4qh\" (UID: \"f90d4209-f294-478a-85fd-42c8e91bd6aa\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-jm4qh" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.000175 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/af171da1-a5e5-4811-8621-3acb113bb571-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-k8lpr\" (UID: \"af171da1-a5e5-4811-8621-3acb113bb571\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-k8lpr" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.000206 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/8a5b34e0-9ec6-42cb-902c-0c8336b514ff-profile-collector-cert\") pod \"olm-operator-6b444d44fb-9fhn9\" (UID: \"8a5b34e0-9ec6-42cb-902c-0c8336b514ff\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-9fhn9" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.000313 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/4d5503fc-0527-456d-b97d-7a455bdf3e7f-bound-sa-token\") pod \"ingress-operator-5b745b69d9-64lkf\" (UID: \"4d5503fc-0527-456d-b97d-7a455bdf3e7f\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-64lkf" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.000351 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f6ktr\" (UniqueName: \"kubernetes.io/projected/8a5b34e0-9ec6-42cb-902c-0c8336b514ff-kube-api-access-f6ktr\") pod \"olm-operator-6b444d44fb-9fhn9\" (UID: \"8a5b34e0-9ec6-42cb-902c-0c8336b514ff\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-9fhn9" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.000377 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/8dbbe964-1a4c-456b-aa6c-e8473ef20bfc-signing-cabundle\") pod \"service-ca-9c57cc56f-skc42\" (UID: \"8dbbe964-1a4c-456b-aa6c-e8473ef20bfc\") " pod="openshift-service-ca/service-ca-9c57cc56f-skc42" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.000404 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cdd0f7b7-d192-425e-87ad-0ec36f050bd7-serving-cert\") pod \"etcd-operator-b45778765-6qfmg\" (UID: \"cdd0f7b7-d192-425e-87ad-0ec36f050bd7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6qfmg" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.000445 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c11241f4-6831-47d6-b5de-e8da9ccf7cad-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-58trd\" (UID: \"c11241f4-6831-47d6-b5de-e8da9ccf7cad\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-58trd" Jan 21 15:26:45 crc kubenswrapper[5021]: E0121 15:26:45.000490 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:45.500444692 +0000 UTC m=+147.035558591 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.000572 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c490e95d-e462-45b2-8352-9603283319e1-service-ca-bundle\") pod \"router-default-5444994796-4459k\" (UID: \"c490e95d-e462-45b2-8352-9603283319e1\") " pod="openshift-ingress/router-default-5444994796-4459k" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.000674 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.000718 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/13da1899-cf91-483d-99ea-7c6aa673e4f7-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-hwzmx\" (UID: \"13da1899-cf91-483d-99ea-7c6aa673e4f7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hwzmx" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.000745 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zvnp4\" (UniqueName: \"kubernetes.io/projected/c0332d4a-fb12-4d96-ae36-bb7295b28a87-kube-api-access-zvnp4\") pod \"marketplace-operator-79b997595-m2brn\" (UID: \"c0332d4a-fb12-4d96-ae36-bb7295b28a87\") " pod="openshift-marketplace/marketplace-operator-79b997595-m2brn" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.000773 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7146e72d-adb4-4283-aad0-8ed4b6363be9-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-kmdn7\" (UID: \"7146e72d-adb4-4283-aad0-8ed4b6363be9\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-kmdn7" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.000792 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f90d4209-f294-478a-85fd-42c8e91bd6aa-serving-cert\") pod \"service-ca-operator-777779d784-jm4qh\" (UID: \"f90d4209-f294-478a-85fd-42c8e91bd6aa\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-jm4qh" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.000820 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3b265d32-a4f3-4a09-931e-6f6ac0b82c1c-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-p5qnh\" (UID: \"3b265d32-a4f3-4a09-931e-6f6ac0b82c1c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-p5qnh" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.000859 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/8ca0f13c-b009-400f-94cd-f50d9209eb6c-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-7t8pz\" (UID: \"8ca0f13c-b009-400f-94cd-f50d9209eb6c\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-7t8pz" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.000940 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4d5503fc-0527-456d-b97d-7a455bdf3e7f-trusted-ca\") pod \"ingress-operator-5b745b69d9-64lkf\" (UID: \"4d5503fc-0527-456d-b97d-7a455bdf3e7f\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-64lkf" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.000986 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/8dbbe964-1a4c-456b-aa6c-e8473ef20bfc-signing-key\") pod \"service-ca-9c57cc56f-skc42\" (UID: \"8dbbe964-1a4c-456b-aa6c-e8473ef20bfc\") " pod="openshift-service-ca/service-ca-9c57cc56f-skc42" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.001022 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c490e95d-e462-45b2-8352-9603283319e1-metrics-certs\") pod \"router-default-5444994796-4459k\" (UID: \"c490e95d-e462-45b2-8352-9603283319e1\") " pod="openshift-ingress/router-default-5444994796-4459k" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.001056 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5b7r7\" (UniqueName: \"kubernetes.io/projected/cdd0f7b7-d192-425e-87ad-0ec36f050bd7-kube-api-access-5b7r7\") pod \"etcd-operator-b45778765-6qfmg\" (UID: \"cdd0f7b7-d192-425e-87ad-0ec36f050bd7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6qfmg" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.001087 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8jtxp\" (UniqueName: \"kubernetes.io/projected/f0a0868f-a7c7-4bce-a9b5-855a11e2631e-kube-api-access-8jtxp\") pod \"csi-hostpathplugin-pkmbf\" (UID: \"f0a0868f-a7c7-4bce-a9b5-855a11e2631e\") " pod="hostpath-provisioner/csi-hostpathplugin-pkmbf" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.001125 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gjdt5\" (UniqueName: \"kubernetes.io/projected/8dbbe964-1a4c-456b-aa6c-e8473ef20bfc-kube-api-access-gjdt5\") pod \"service-ca-9c57cc56f-skc42\" (UID: \"8dbbe964-1a4c-456b-aa6c-e8473ef20bfc\") " pod="openshift-service-ca/service-ca-9c57cc56f-skc42" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.001158 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/e1a2d469-35da-4253-b3a5-057b68c4d68b-registry-tls\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.001193 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/f0a0868f-a7c7-4bce-a9b5-855a11e2631e-plugins-dir\") pod \"csi-hostpathplugin-pkmbf\" (UID: \"f0a0868f-a7c7-4bce-a9b5-855a11e2631e\") " pod="hostpath-provisioner/csi-hostpathplugin-pkmbf" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.001216 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kfcpf\" (UniqueName: \"kubernetes.io/projected/d7b4ba4d-dc1d-4720-81f3-57059d529def-kube-api-access-kfcpf\") pod \"catalog-operator-68c6474976-phhx7\" (UID: \"d7b4ba4d-dc1d-4720-81f3-57059d529def\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-phhx7" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.001251 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5mqdr\" (UniqueName: \"kubernetes.io/projected/c490e95d-e462-45b2-8352-9603283319e1-kube-api-access-5mqdr\") pod \"router-default-5444994796-4459k\" (UID: \"c490e95d-e462-45b2-8352-9603283319e1\") " pod="openshift-ingress/router-default-5444994796-4459k" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.001277 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f90d4209-f294-478a-85fd-42c8e91bd6aa-config\") pod \"service-ca-operator-777779d784-jm4qh\" (UID: \"f90d4209-f294-478a-85fd-42c8e91bd6aa\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-jm4qh" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.001301 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3b265d32-a4f3-4a09-931e-6f6ac0b82c1c-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-p5qnh\" (UID: \"3b265d32-a4f3-4a09-931e-6f6ac0b82c1c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-p5qnh" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.001324 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/d7b4ba4d-dc1d-4720-81f3-57059d529def-srv-cert\") pod \"catalog-operator-68c6474976-phhx7\" (UID: \"d7b4ba4d-dc1d-4720-81f3-57059d529def\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-phhx7" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.001350 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/d7b4ba4d-dc1d-4720-81f3-57059d529def-profile-collector-cert\") pod \"catalog-operator-68c6474976-phhx7\" (UID: \"d7b4ba4d-dc1d-4720-81f3-57059d529def\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-phhx7" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.001389 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/e1a2d469-35da-4253-b3a5-057b68c4d68b-ca-trust-extracted\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.001415 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bbrn8\" (UniqueName: \"kubernetes.io/projected/da0f51c3-e6b4-49ae-8286-500e3ff30211-kube-api-access-bbrn8\") pod \"migrator-59844c95c7-hd2pm\" (UID: \"da0f51c3-e6b4-49ae-8286-500e3ff30211\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-hd2pm" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.001456 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/f0a0868f-a7c7-4bce-a9b5-855a11e2631e-socket-dir\") pod \"csi-hostpathplugin-pkmbf\" (UID: \"f0a0868f-a7c7-4bce-a9b5-855a11e2631e\") " pod="hostpath-provisioner/csi-hostpathplugin-pkmbf" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.001518 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/e1a2d469-35da-4253-b3a5-057b68c4d68b-installation-pull-secrets\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.001545 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/f0a0868f-a7c7-4bce-a9b5-855a11e2631e-registration-dir\") pod \"csi-hostpathplugin-pkmbf\" (UID: \"f0a0868f-a7c7-4bce-a9b5-855a11e2631e\") " pod="hostpath-provisioner/csi-hostpathplugin-pkmbf" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.001572 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/f0a0868f-a7c7-4bce-a9b5-855a11e2631e-mountpoint-dir\") pod \"csi-hostpathplugin-pkmbf\" (UID: \"f0a0868f-a7c7-4bce-a9b5-855a11e2631e\") " pod="hostpath-provisioner/csi-hostpathplugin-pkmbf" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.001601 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/f0a0868f-a7c7-4bce-a9b5-855a11e2631e-csi-data-dir\") pod \"csi-hostpathplugin-pkmbf\" (UID: \"f0a0868f-a7c7-4bce-a9b5-855a11e2631e\") " pod="hostpath-provisioner/csi-hostpathplugin-pkmbf" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.001623 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/71337f59-d4e5-47da-9d8e-759bd17cfdc3-config-volume\") pod \"collect-profiles-29483475-7s6jf\" (UID: \"71337f59-d4e5-47da-9d8e-759bd17cfdc3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483475-7s6jf" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.001648 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af171da1-a5e5-4811-8621-3acb113bb571-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-k8lpr\" (UID: \"af171da1-a5e5-4811-8621-3acb113bb571\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-k8lpr" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.001680 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/8fb86530-c6d5-4e68-960b-2eadac1c8973-tmpfs\") pod \"packageserver-d55dfcdfc-w8nsc\" (UID: \"8fb86530-c6d5-4e68-960b-2eadac1c8973\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-w8nsc" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.001710 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rj5gj\" (UniqueName: \"kubernetes.io/projected/e1a2d469-35da-4253-b3a5-057b68c4d68b-kube-api-access-rj5gj\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.001734 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rzrjh\" (UniqueName: \"kubernetes.io/projected/4d5503fc-0527-456d-b97d-7a455bdf3e7f-kube-api-access-rzrjh\") pod \"ingress-operator-5b745b69d9-64lkf\" (UID: \"4d5503fc-0527-456d-b97d-7a455bdf3e7f\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-64lkf" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.001769 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7146e72d-adb4-4283-aad0-8ed4b6363be9-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-kmdn7\" (UID: \"7146e72d-adb4-4283-aad0-8ed4b6363be9\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-kmdn7" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.001790 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rg6z6\" (UniqueName: \"kubernetes.io/projected/8ca0f13c-b009-400f-94cd-f50d9209eb6c-kube-api-access-rg6z6\") pod \"package-server-manager-789f6589d5-7t8pz\" (UID: \"8ca0f13c-b009-400f-94cd-f50d9209eb6c\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-7t8pz" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.001811 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/4d5503fc-0527-456d-b97d-7a455bdf3e7f-metrics-tls\") pod \"ingress-operator-5b745b69d9-64lkf\" (UID: \"4d5503fc-0527-456d-b97d-7a455bdf3e7f\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-64lkf" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.001967 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e1a2d469-35da-4253-b3a5-057b68c4d68b-trusted-ca\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.002029 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/e1a2d469-35da-4253-b3a5-057b68c4d68b-registry-certificates\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:45 crc kubenswrapper[5021]: E0121 15:26:45.002512 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:45.50249088 +0000 UTC m=+147.037604949 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.002927 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/e1a2d469-35da-4253-b3a5-057b68c4d68b-ca-trust-extracted\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.003141 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3b265d32-a4f3-4a09-931e-6f6ac0b82c1c-config\") pod \"kube-controller-manager-operator-78b949d7b-p5qnh\" (UID: \"3b265d32-a4f3-4a09-931e-6f6ac0b82c1c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-p5qnh" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.003190 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-slw58\" (UniqueName: \"kubernetes.io/projected/d5642370-ee34-4ee6-8ae8-0951768da987-kube-api-access-slw58\") pod \"machine-config-controller-84d6567774-ntg54\" (UID: \"d5642370-ee34-4ee6-8ae8-0951768da987\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ntg54" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.003222 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/256b478f-1967-4699-857d-bc41f69654f2-proxy-tls\") pod \"machine-config-operator-74547568cd-m6vx8\" (UID: \"256b478f-1967-4699-857d-bc41f69654f2\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-m6vx8" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.003240 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/1530383e-f6f4-47de-8302-dfe172a883e7-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-gglqm\" (UID: \"1530383e-f6f4-47de-8302-dfe172a883e7\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-gglqm" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.003314 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/8fb86530-c6d5-4e68-960b-2eadac1c8973-apiservice-cert\") pod \"packageserver-d55dfcdfc-w8nsc\" (UID: \"8fb86530-c6d5-4e68-960b-2eadac1c8973\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-w8nsc" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.003336 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/ad524c85-51c3-47c1-b649-eca2cacdf4a6-certs\") pod \"machine-config-server-hkbvp\" (UID: \"ad524c85-51c3-47c1-b649-eca2cacdf4a6\") " pod="openshift-machine-config-operator/machine-config-server-hkbvp" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.003370 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/8a5b34e0-9ec6-42cb-902c-0c8336b514ff-srv-cert\") pod \"olm-operator-6b444d44fb-9fhn9\" (UID: \"8a5b34e0-9ec6-42cb-902c-0c8336b514ff\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-9fhn9" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.003390 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c11241f4-6831-47d6-b5de-e8da9ccf7cad-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-58trd\" (UID: \"c11241f4-6831-47d6-b5de-e8da9ccf7cad\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-58trd" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.003449 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/cdd0f7b7-d192-425e-87ad-0ec36f050bd7-etcd-ca\") pod \"etcd-operator-b45778765-6qfmg\" (UID: \"cdd0f7b7-d192-425e-87ad-0ec36f050bd7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6qfmg" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.003486 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/e1a2d469-35da-4253-b3a5-057b68c4d68b-bound-sa-token\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.003506 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/ad524c85-51c3-47c1-b649-eca2cacdf4a6-node-bootstrap-token\") pod \"machine-config-server-hkbvp\" (UID: \"ad524c85-51c3-47c1-b649-eca2cacdf4a6\") " pod="openshift-machine-config-operator/machine-config-server-hkbvp" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.003627 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c11241f4-6831-47d6-b5de-e8da9ccf7cad-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-58trd\" (UID: \"c11241f4-6831-47d6-b5de-e8da9ccf7cad\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-58trd" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.007803 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d5642370-ee34-4ee6-8ae8-0951768da987-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-ntg54\" (UID: \"d5642370-ee34-4ee6-8ae8-0951768da987\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ntg54" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.007891 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4c8x2\" (UniqueName: \"kubernetes.io/projected/1530383e-f6f4-47de-8302-dfe172a883e7-kube-api-access-4c8x2\") pod \"control-plane-machine-set-operator-78cbb6b69f-gglqm\" (UID: \"1530383e-f6f4-47de-8302-dfe172a883e7\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-gglqm" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.008001 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9ftnh\" (UniqueName: \"kubernetes.io/projected/13da1899-cf91-483d-99ea-7c6aa673e4f7-kube-api-access-9ftnh\") pod \"kube-storage-version-migrator-operator-b67b599dd-hwzmx\" (UID: \"13da1899-cf91-483d-99ea-7c6aa673e4f7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hwzmx" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.008058 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9bdw9\" (UniqueName: \"kubernetes.io/projected/8fb86530-c6d5-4e68-960b-2eadac1c8973-kube-api-access-9bdw9\") pod \"packageserver-d55dfcdfc-w8nsc\" (UID: \"8fb86530-c6d5-4e68-960b-2eadac1c8973\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-w8nsc" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.008100 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/71337f59-d4e5-47da-9d8e-759bd17cfdc3-secret-volume\") pod \"collect-profiles-29483475-7s6jf\" (UID: \"71337f59-d4e5-47da-9d8e-759bd17cfdc3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483475-7s6jf" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.008106 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.008143 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2tt9x\" (UniqueName: \"kubernetes.io/projected/256b478f-1967-4699-857d-bc41f69654f2-kube-api-access-2tt9x\") pod \"machine-config-operator-74547568cd-m6vx8\" (UID: \"256b478f-1967-4699-857d-bc41f69654f2\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-m6vx8" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.008346 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-76kmh\" (UniqueName: \"kubernetes.io/projected/0654cecd-38e1-4678-9452-5e8b8b1dd07f-kube-api-access-76kmh\") pod \"multus-admission-controller-857f4d67dd-gm5rm\" (UID: \"0654cecd-38e1-4678-9452-5e8b8b1dd07f\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-gm5rm" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.008426 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/cdd0f7b7-d192-425e-87ad-0ec36f050bd7-etcd-service-ca\") pod \"etcd-operator-b45778765-6qfmg\" (UID: \"cdd0f7b7-d192-425e-87ad-0ec36f050bd7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6qfmg" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.008426 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/e1a2d469-35da-4253-b3a5-057b68c4d68b-registry-certificates\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.024506 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.026124 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/e1a2d469-35da-4253-b3a5-057b68c4d68b-installation-pull-secrets\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.028264 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/e1a2d469-35da-4253-b3a5-057b68c4d68b-registry-tls\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.044111 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.051172 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.063225 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.067345 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.083741 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.103225 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.109734 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.110077 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4d5503fc-0527-456d-b97d-7a455bdf3e7f-trusted-ca\") pod \"ingress-operator-5b745b69d9-64lkf\" (UID: \"4d5503fc-0527-456d-b97d-7a455bdf3e7f\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-64lkf" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.110114 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/8ca0f13c-b009-400f-94cd-f50d9209eb6c-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-7t8pz\" (UID: \"8ca0f13c-b009-400f-94cd-f50d9209eb6c\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-7t8pz" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.110143 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/8dbbe964-1a4c-456b-aa6c-e8473ef20bfc-signing-key\") pod \"service-ca-9c57cc56f-skc42\" (UID: \"8dbbe964-1a4c-456b-aa6c-e8473ef20bfc\") " pod="openshift-service-ca/service-ca-9c57cc56f-skc42" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.110169 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c490e95d-e462-45b2-8352-9603283319e1-metrics-certs\") pod \"router-default-5444994796-4459k\" (UID: \"c490e95d-e462-45b2-8352-9603283319e1\") " pod="openshift-ingress/router-default-5444994796-4459k" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.110202 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5b7r7\" (UniqueName: \"kubernetes.io/projected/cdd0f7b7-d192-425e-87ad-0ec36f050bd7-kube-api-access-5b7r7\") pod \"etcd-operator-b45778765-6qfmg\" (UID: \"cdd0f7b7-d192-425e-87ad-0ec36f050bd7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6qfmg" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.110233 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8jtxp\" (UniqueName: \"kubernetes.io/projected/f0a0868f-a7c7-4bce-a9b5-855a11e2631e-kube-api-access-8jtxp\") pod \"csi-hostpathplugin-pkmbf\" (UID: \"f0a0868f-a7c7-4bce-a9b5-855a11e2631e\") " pod="hostpath-provisioner/csi-hostpathplugin-pkmbf" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.110270 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gjdt5\" (UniqueName: \"kubernetes.io/projected/8dbbe964-1a4c-456b-aa6c-e8473ef20bfc-kube-api-access-gjdt5\") pod \"service-ca-9c57cc56f-skc42\" (UID: \"8dbbe964-1a4c-456b-aa6c-e8473ef20bfc\") " pod="openshift-service-ca/service-ca-9c57cc56f-skc42" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.110306 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7z62q\" (UniqueName: \"kubernetes.io/projected/7714bc37-dbe3-45b4-8bb2-84e9d9d05779-kube-api-access-7z62q\") pod \"ingress-canary-xq97s\" (UID: \"7714bc37-dbe3-45b4-8bb2-84e9d9d05779\") " pod="openshift-ingress-canary/ingress-canary-xq97s" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.110367 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/f0a0868f-a7c7-4bce-a9b5-855a11e2631e-plugins-dir\") pod \"csi-hostpathplugin-pkmbf\" (UID: \"f0a0868f-a7c7-4bce-a9b5-855a11e2631e\") " pod="hostpath-provisioner/csi-hostpathplugin-pkmbf" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.110394 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kfcpf\" (UniqueName: \"kubernetes.io/projected/d7b4ba4d-dc1d-4720-81f3-57059d529def-kube-api-access-kfcpf\") pod \"catalog-operator-68c6474976-phhx7\" (UID: \"d7b4ba4d-dc1d-4720-81f3-57059d529def\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-phhx7" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.110417 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f90d4209-f294-478a-85fd-42c8e91bd6aa-config\") pod \"service-ca-operator-777779d784-jm4qh\" (UID: \"f90d4209-f294-478a-85fd-42c8e91bd6aa\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-jm4qh" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.110444 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3b265d32-a4f3-4a09-931e-6f6ac0b82c1c-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-p5qnh\" (UID: \"3b265d32-a4f3-4a09-931e-6f6ac0b82c1c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-p5qnh" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.110538 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/d7b4ba4d-dc1d-4720-81f3-57059d529def-srv-cert\") pod \"catalog-operator-68c6474976-phhx7\" (UID: \"d7b4ba4d-dc1d-4720-81f3-57059d529def\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-phhx7" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.110571 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5mqdr\" (UniqueName: \"kubernetes.io/projected/c490e95d-e462-45b2-8352-9603283319e1-kube-api-access-5mqdr\") pod \"router-default-5444994796-4459k\" (UID: \"c490e95d-e462-45b2-8352-9603283319e1\") " pod="openshift-ingress/router-default-5444994796-4459k" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.110607 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/d7b4ba4d-dc1d-4720-81f3-57059d529def-profile-collector-cert\") pod \"catalog-operator-68c6474976-phhx7\" (UID: \"d7b4ba4d-dc1d-4720-81f3-57059d529def\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-phhx7" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.110651 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bbrn8\" (UniqueName: \"kubernetes.io/projected/da0f51c3-e6b4-49ae-8286-500e3ff30211-kube-api-access-bbrn8\") pod \"migrator-59844c95c7-hd2pm\" (UID: \"da0f51c3-e6b4-49ae-8286-500e3ff30211\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-hd2pm" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.110682 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/f0a0868f-a7c7-4bce-a9b5-855a11e2631e-socket-dir\") pod \"csi-hostpathplugin-pkmbf\" (UID: \"f0a0868f-a7c7-4bce-a9b5-855a11e2631e\") " pod="hostpath-provisioner/csi-hostpathplugin-pkmbf" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.110728 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/f0a0868f-a7c7-4bce-a9b5-855a11e2631e-registration-dir\") pod \"csi-hostpathplugin-pkmbf\" (UID: \"f0a0868f-a7c7-4bce-a9b5-855a11e2631e\") " pod="hostpath-provisioner/csi-hostpathplugin-pkmbf" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.110757 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/f0a0868f-a7c7-4bce-a9b5-855a11e2631e-mountpoint-dir\") pod \"csi-hostpathplugin-pkmbf\" (UID: \"f0a0868f-a7c7-4bce-a9b5-855a11e2631e\") " pod="hostpath-provisioner/csi-hostpathplugin-pkmbf" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.110782 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/f0a0868f-a7c7-4bce-a9b5-855a11e2631e-csi-data-dir\") pod \"csi-hostpathplugin-pkmbf\" (UID: \"f0a0868f-a7c7-4bce-a9b5-855a11e2631e\") " pod="hostpath-provisioner/csi-hostpathplugin-pkmbf" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.110838 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/71337f59-d4e5-47da-9d8e-759bd17cfdc3-config-volume\") pod \"collect-profiles-29483475-7s6jf\" (UID: \"71337f59-d4e5-47da-9d8e-759bd17cfdc3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483475-7s6jf" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.110864 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af171da1-a5e5-4811-8621-3acb113bb571-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-k8lpr\" (UID: \"af171da1-a5e5-4811-8621-3acb113bb571\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-k8lpr" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.110890 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/8fb86530-c6d5-4e68-960b-2eadac1c8973-tmpfs\") pod \"packageserver-d55dfcdfc-w8nsc\" (UID: \"8fb86530-c6d5-4e68-960b-2eadac1c8973\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-w8nsc" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.110941 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rzrjh\" (UniqueName: \"kubernetes.io/projected/4d5503fc-0527-456d-b97d-7a455bdf3e7f-kube-api-access-rzrjh\") pod \"ingress-operator-5b745b69d9-64lkf\" (UID: \"4d5503fc-0527-456d-b97d-7a455bdf3e7f\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-64lkf" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.110970 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7146e72d-adb4-4283-aad0-8ed4b6363be9-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-kmdn7\" (UID: \"7146e72d-adb4-4283-aad0-8ed4b6363be9\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-kmdn7" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.111000 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rg6z6\" (UniqueName: \"kubernetes.io/projected/8ca0f13c-b009-400f-94cd-f50d9209eb6c-kube-api-access-rg6z6\") pod \"package-server-manager-789f6589d5-7t8pz\" (UID: \"8ca0f13c-b009-400f-94cd-f50d9209eb6c\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-7t8pz" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.111026 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/4d5503fc-0527-456d-b97d-7a455bdf3e7f-metrics-tls\") pod \"ingress-operator-5b745b69d9-64lkf\" (UID: \"4d5503fc-0527-456d-b97d-7a455bdf3e7f\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-64lkf" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.111090 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3b265d32-a4f3-4a09-931e-6f6ac0b82c1c-config\") pod \"kube-controller-manager-operator-78b949d7b-p5qnh\" (UID: \"3b265d32-a4f3-4a09-931e-6f6ac0b82c1c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-p5qnh" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.111130 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-slw58\" (UniqueName: \"kubernetes.io/projected/d5642370-ee34-4ee6-8ae8-0951768da987-kube-api-access-slw58\") pod \"machine-config-controller-84d6567774-ntg54\" (UID: \"d5642370-ee34-4ee6-8ae8-0951768da987\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ntg54" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.111157 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/256b478f-1967-4699-857d-bc41f69654f2-proxy-tls\") pod \"machine-config-operator-74547568cd-m6vx8\" (UID: \"256b478f-1967-4699-857d-bc41f69654f2\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-m6vx8" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.111182 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/1530383e-f6f4-47de-8302-dfe172a883e7-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-gglqm\" (UID: \"1530383e-f6f4-47de-8302-dfe172a883e7\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-gglqm" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.111210 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/8fb86530-c6d5-4e68-960b-2eadac1c8973-apiservice-cert\") pod \"packageserver-d55dfcdfc-w8nsc\" (UID: \"8fb86530-c6d5-4e68-960b-2eadac1c8973\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-w8nsc" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.111237 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/ad524c85-51c3-47c1-b649-eca2cacdf4a6-certs\") pod \"machine-config-server-hkbvp\" (UID: \"ad524c85-51c3-47c1-b649-eca2cacdf4a6\") " pod="openshift-machine-config-operator/machine-config-server-hkbvp" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.111275 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c11241f4-6831-47d6-b5de-e8da9ccf7cad-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-58trd\" (UID: \"c11241f4-6831-47d6-b5de-e8da9ccf7cad\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-58trd" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.111302 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/8a5b34e0-9ec6-42cb-902c-0c8336b514ff-srv-cert\") pod \"olm-operator-6b444d44fb-9fhn9\" (UID: \"8a5b34e0-9ec6-42cb-902c-0c8336b514ff\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-9fhn9" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.111325 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/cdd0f7b7-d192-425e-87ad-0ec36f050bd7-etcd-ca\") pod \"etcd-operator-b45778765-6qfmg\" (UID: \"cdd0f7b7-d192-425e-87ad-0ec36f050bd7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6qfmg" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.111354 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/ad524c85-51c3-47c1-b649-eca2cacdf4a6-node-bootstrap-token\") pod \"machine-config-server-hkbvp\" (UID: \"ad524c85-51c3-47c1-b649-eca2cacdf4a6\") " pod="openshift-machine-config-operator/machine-config-server-hkbvp" Jan 21 15:26:45 crc kubenswrapper[5021]: E0121 15:26:45.111404 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:45.611363234 +0000 UTC m=+147.146477293 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.111483 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c11241f4-6831-47d6-b5de-e8da9ccf7cad-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-58trd\" (UID: \"c11241f4-6831-47d6-b5de-e8da9ccf7cad\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-58trd" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.111550 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/f0a0868f-a7c7-4bce-a9b5-855a11e2631e-socket-dir\") pod \"csi-hostpathplugin-pkmbf\" (UID: \"f0a0868f-a7c7-4bce-a9b5-855a11e2631e\") " pod="hostpath-provisioner/csi-hostpathplugin-pkmbf" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.111564 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/f0a0868f-a7c7-4bce-a9b5-855a11e2631e-plugins-dir\") pod \"csi-hostpathplugin-pkmbf\" (UID: \"f0a0868f-a7c7-4bce-a9b5-855a11e2631e\") " pod="hostpath-provisioner/csi-hostpathplugin-pkmbf" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.111602 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d5642370-ee34-4ee6-8ae8-0951768da987-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-ntg54\" (UID: \"d5642370-ee34-4ee6-8ae8-0951768da987\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ntg54" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.111634 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/f0a0868f-a7c7-4bce-a9b5-855a11e2631e-registration-dir\") pod \"csi-hostpathplugin-pkmbf\" (UID: \"f0a0868f-a7c7-4bce-a9b5-855a11e2631e\") " pod="hostpath-provisioner/csi-hostpathplugin-pkmbf" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.111672 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4c8x2\" (UniqueName: \"kubernetes.io/projected/1530383e-f6f4-47de-8302-dfe172a883e7-kube-api-access-4c8x2\") pod \"control-plane-machine-set-operator-78cbb6b69f-gglqm\" (UID: \"1530383e-f6f4-47de-8302-dfe172a883e7\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-gglqm" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.111685 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/f0a0868f-a7c7-4bce-a9b5-855a11e2631e-mountpoint-dir\") pod \"csi-hostpathplugin-pkmbf\" (UID: \"f0a0868f-a7c7-4bce-a9b5-855a11e2631e\") " pod="hostpath-provisioner/csi-hostpathplugin-pkmbf" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.111730 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9ftnh\" (UniqueName: \"kubernetes.io/projected/13da1899-cf91-483d-99ea-7c6aa673e4f7-kube-api-access-9ftnh\") pod \"kube-storage-version-migrator-operator-b67b599dd-hwzmx\" (UID: \"13da1899-cf91-483d-99ea-7c6aa673e4f7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hwzmx" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.111766 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/f0a0868f-a7c7-4bce-a9b5-855a11e2631e-csi-data-dir\") pod \"csi-hostpathplugin-pkmbf\" (UID: \"f0a0868f-a7c7-4bce-a9b5-855a11e2631e\") " pod="hostpath-provisioner/csi-hostpathplugin-pkmbf" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.111777 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9bdw9\" (UniqueName: \"kubernetes.io/projected/8fb86530-c6d5-4e68-960b-2eadac1c8973-kube-api-access-9bdw9\") pod \"packageserver-d55dfcdfc-w8nsc\" (UID: \"8fb86530-c6d5-4e68-960b-2eadac1c8973\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-w8nsc" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.111808 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/71337f59-d4e5-47da-9d8e-759bd17cfdc3-secret-volume\") pod \"collect-profiles-29483475-7s6jf\" (UID: \"71337f59-d4e5-47da-9d8e-759bd17cfdc3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483475-7s6jf" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.111834 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2tt9x\" (UniqueName: \"kubernetes.io/projected/256b478f-1967-4699-857d-bc41f69654f2-kube-api-access-2tt9x\") pod \"machine-config-operator-74547568cd-m6vx8\" (UID: \"256b478f-1967-4699-857d-bc41f69654f2\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-m6vx8" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.111860 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-76kmh\" (UniqueName: \"kubernetes.io/projected/0654cecd-38e1-4678-9452-5e8b8b1dd07f-kube-api-access-76kmh\") pod \"multus-admission-controller-857f4d67dd-gm5rm\" (UID: \"0654cecd-38e1-4678-9452-5e8b8b1dd07f\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-gm5rm" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.111885 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/cdd0f7b7-d192-425e-87ad-0ec36f050bd7-etcd-service-ca\") pod \"etcd-operator-b45778765-6qfmg\" (UID: \"cdd0f7b7-d192-425e-87ad-0ec36f050bd7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6qfmg" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.111905 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/d5642370-ee34-4ee6-8ae8-0951768da987-proxy-tls\") pod \"machine-config-controller-84d6567774-ntg54\" (UID: \"d5642370-ee34-4ee6-8ae8-0951768da987\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ntg54" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.111958 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7pphp\" (UniqueName: \"kubernetes.io/projected/71337f59-d4e5-47da-9d8e-759bd17cfdc3-kube-api-access-7pphp\") pod \"collect-profiles-29483475-7s6jf\" (UID: \"71337f59-d4e5-47da-9d8e-759bd17cfdc3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483475-7s6jf" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.111995 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c490e95d-e462-45b2-8352-9603283319e1-default-certificate\") pod \"router-default-5444994796-4459k\" (UID: \"c490e95d-e462-45b2-8352-9603283319e1\") " pod="openshift-ingress/router-default-5444994796-4459k" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.112018 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c490e95d-e462-45b2-8352-9603283319e1-stats-auth\") pod \"router-default-5444994796-4459k\" (UID: \"c490e95d-e462-45b2-8352-9603283319e1\") " pod="openshift-ingress/router-default-5444994796-4459k" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.112044 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/0654cecd-38e1-4678-9452-5e8b8b1dd07f-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-gm5rm\" (UID: \"0654cecd-38e1-4678-9452-5e8b8b1dd07f\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-gm5rm" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.112064 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7146e72d-adb4-4283-aad0-8ed4b6363be9-config\") pod \"kube-apiserver-operator-766d6c64bb-kmdn7\" (UID: \"7146e72d-adb4-4283-aad0-8ed4b6363be9\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-kmdn7" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.112080 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/13da1899-cf91-483d-99ea-7c6aa673e4f7-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-hwzmx\" (UID: \"13da1899-cf91-483d-99ea-7c6aa673e4f7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hwzmx" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.112094 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/8fb86530-c6d5-4e68-960b-2eadac1c8973-webhook-cert\") pod \"packageserver-d55dfcdfc-w8nsc\" (UID: \"8fb86530-c6d5-4e68-960b-2eadac1c8973\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-w8nsc" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.112114 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4v4mq\" (UniqueName: \"kubernetes.io/projected/af171da1-a5e5-4811-8621-3acb113bb571-kube-api-access-4v4mq\") pod \"openshift-controller-manager-operator-756b6f6bc6-k8lpr\" (UID: \"af171da1-a5e5-4811-8621-3acb113bb571\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-k8lpr" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.112139 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/256b478f-1967-4699-857d-bc41f69654f2-auth-proxy-config\") pod \"machine-config-operator-74547568cd-m6vx8\" (UID: \"256b478f-1967-4699-857d-bc41f69654f2\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-m6vx8" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.112155 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cdd0f7b7-d192-425e-87ad-0ec36f050bd7-config\") pod \"etcd-operator-b45778765-6qfmg\" (UID: \"cdd0f7b7-d192-425e-87ad-0ec36f050bd7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6qfmg" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.112172 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/cdd0f7b7-d192-425e-87ad-0ec36f050bd7-etcd-client\") pod \"etcd-operator-b45778765-6qfmg\" (UID: \"cdd0f7b7-d192-425e-87ad-0ec36f050bd7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6qfmg" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.112191 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/256b478f-1967-4699-857d-bc41f69654f2-images\") pod \"machine-config-operator-74547568cd-m6vx8\" (UID: \"256b478f-1967-4699-857d-bc41f69654f2\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-m6vx8" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.112206 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c0332d4a-fb12-4d96-ae36-bb7295b28a87-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-m2brn\" (UID: \"c0332d4a-fb12-4d96-ae36-bb7295b28a87\") " pod="openshift-marketplace/marketplace-operator-79b997595-m2brn" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.112241 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-278gw\" (UniqueName: \"kubernetes.io/projected/ad524c85-51c3-47c1-b649-eca2cacdf4a6-kube-api-access-278gw\") pod \"machine-config-server-hkbvp\" (UID: \"ad524c85-51c3-47c1-b649-eca2cacdf4a6\") " pod="openshift-machine-config-operator/machine-config-server-hkbvp" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.112259 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/c0332d4a-fb12-4d96-ae36-bb7295b28a87-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-m2brn\" (UID: \"c0332d4a-fb12-4d96-ae36-bb7295b28a87\") " pod="openshift-marketplace/marketplace-operator-79b997595-m2brn" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.112287 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7714bc37-dbe3-45b4-8bb2-84e9d9d05779-cert\") pod \"ingress-canary-xq97s\" (UID: \"7714bc37-dbe3-45b4-8bb2-84e9d9d05779\") " pod="openshift-ingress-canary/ingress-canary-xq97s" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.112313 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xtkpx\" (UniqueName: \"kubernetes.io/projected/f90d4209-f294-478a-85fd-42c8e91bd6aa-kube-api-access-xtkpx\") pod \"service-ca-operator-777779d784-jm4qh\" (UID: \"f90d4209-f294-478a-85fd-42c8e91bd6aa\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-jm4qh" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.112329 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/af171da1-a5e5-4811-8621-3acb113bb571-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-k8lpr\" (UID: \"af171da1-a5e5-4811-8621-3acb113bb571\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-k8lpr" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.112352 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/8a5b34e0-9ec6-42cb-902c-0c8336b514ff-profile-collector-cert\") pod \"olm-operator-6b444d44fb-9fhn9\" (UID: \"8a5b34e0-9ec6-42cb-902c-0c8336b514ff\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-9fhn9" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.112371 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/4d5503fc-0527-456d-b97d-7a455bdf3e7f-bound-sa-token\") pod \"ingress-operator-5b745b69d9-64lkf\" (UID: \"4d5503fc-0527-456d-b97d-7a455bdf3e7f\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-64lkf" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.112397 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f6ktr\" (UniqueName: \"kubernetes.io/projected/8a5b34e0-9ec6-42cb-902c-0c8336b514ff-kube-api-access-f6ktr\") pod \"olm-operator-6b444d44fb-9fhn9\" (UID: \"8a5b34e0-9ec6-42cb-902c-0c8336b514ff\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-9fhn9" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.112416 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/8dbbe964-1a4c-456b-aa6c-e8473ef20bfc-signing-cabundle\") pod \"service-ca-9c57cc56f-skc42\" (UID: \"8dbbe964-1a4c-456b-aa6c-e8473ef20bfc\") " pod="openshift-service-ca/service-ca-9c57cc56f-skc42" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.112441 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cdd0f7b7-d192-425e-87ad-0ec36f050bd7-serving-cert\") pod \"etcd-operator-b45778765-6qfmg\" (UID: \"cdd0f7b7-d192-425e-87ad-0ec36f050bd7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6qfmg" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.112457 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c11241f4-6831-47d6-b5de-e8da9ccf7cad-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-58trd\" (UID: \"c11241f4-6831-47d6-b5de-e8da9ccf7cad\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-58trd" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.112537 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c490e95d-e462-45b2-8352-9603283319e1-service-ca-bundle\") pod \"router-default-5444994796-4459k\" (UID: \"c490e95d-e462-45b2-8352-9603283319e1\") " pod="openshift-ingress/router-default-5444994796-4459k" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.112574 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.112610 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/13da1899-cf91-483d-99ea-7c6aa673e4f7-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-hwzmx\" (UID: \"13da1899-cf91-483d-99ea-7c6aa673e4f7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hwzmx" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.112628 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/8fb86530-c6d5-4e68-960b-2eadac1c8973-tmpfs\") pod \"packageserver-d55dfcdfc-w8nsc\" (UID: \"8fb86530-c6d5-4e68-960b-2eadac1c8973\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-w8nsc" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.112636 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zvnp4\" (UniqueName: \"kubernetes.io/projected/c0332d4a-fb12-4d96-ae36-bb7295b28a87-kube-api-access-zvnp4\") pod \"marketplace-operator-79b997595-m2brn\" (UID: \"c0332d4a-fb12-4d96-ae36-bb7295b28a87\") " pod="openshift-marketplace/marketplace-operator-79b997595-m2brn" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.112660 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7146e72d-adb4-4283-aad0-8ed4b6363be9-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-kmdn7\" (UID: \"7146e72d-adb4-4283-aad0-8ed4b6363be9\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-kmdn7" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.112679 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f90d4209-f294-478a-85fd-42c8e91bd6aa-serving-cert\") pod \"service-ca-operator-777779d784-jm4qh\" (UID: \"f90d4209-f294-478a-85fd-42c8e91bd6aa\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-jm4qh" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.112697 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3b265d32-a4f3-4a09-931e-6f6ac0b82c1c-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-p5qnh\" (UID: \"3b265d32-a4f3-4a09-931e-6f6ac0b82c1c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-p5qnh" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.112737 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d5642370-ee34-4ee6-8ae8-0951768da987-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-ntg54\" (UID: \"d5642370-ee34-4ee6-8ae8-0951768da987\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ntg54" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.120423 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/d7b4ba4d-dc1d-4720-81f3-57059d529def-srv-cert\") pod \"catalog-operator-68c6474976-phhx7\" (UID: \"d7b4ba4d-dc1d-4720-81f3-57059d529def\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-phhx7" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.123679 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.126753 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7146e72d-adb4-4283-aad0-8ed4b6363be9-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-kmdn7\" (UID: \"7146e72d-adb4-4283-aad0-8ed4b6363be9\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-kmdn7" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.127186 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c490e95d-e462-45b2-8352-9603283319e1-metrics-certs\") pod \"router-default-5444994796-4459k\" (UID: \"c490e95d-e462-45b2-8352-9603283319e1\") " pod="openshift-ingress/router-default-5444994796-4459k" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.131841 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/cdd0f7b7-d192-425e-87ad-0ec36f050bd7-etcd-service-ca\") pod \"etcd-operator-b45778765-6qfmg\" (UID: \"cdd0f7b7-d192-425e-87ad-0ec36f050bd7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6qfmg" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.140444 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/af171da1-a5e5-4811-8621-3acb113bb571-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-k8lpr\" (UID: \"af171da1-a5e5-4811-8621-3acb113bb571\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-k8lpr" Jan 21 15:26:45 crc kubenswrapper[5021]: E0121 15:26:45.142777 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:45.642741624 +0000 UTC m=+147.177855513 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.145207 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/13da1899-cf91-483d-99ea-7c6aa673e4f7-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-hwzmx\" (UID: \"13da1899-cf91-483d-99ea-7c6aa673e4f7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hwzmx" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.145307 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/256b478f-1967-4699-857d-bc41f69654f2-auth-proxy-config\") pod \"machine-config-operator-74547568cd-m6vx8\" (UID: \"256b478f-1967-4699-857d-bc41f69654f2\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-m6vx8" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.151984 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/d5642370-ee34-4ee6-8ae8-0951768da987-proxy-tls\") pod \"machine-config-controller-84d6567774-ntg54\" (UID: \"d5642370-ee34-4ee6-8ae8-0951768da987\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ntg54" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.156167 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7146e72d-adb4-4283-aad0-8ed4b6363be9-config\") pod \"kube-apiserver-operator-766d6c64bb-kmdn7\" (UID: \"7146e72d-adb4-4283-aad0-8ed4b6363be9\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-kmdn7" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.157087 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/8fb86530-c6d5-4e68-960b-2eadac1c8973-webhook-cert\") pod \"packageserver-d55dfcdfc-w8nsc\" (UID: \"8fb86530-c6d5-4e68-960b-2eadac1c8973\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-w8nsc" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.158284 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c0332d4a-fb12-4d96-ae36-bb7295b28a87-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-m2brn\" (UID: \"c0332d4a-fb12-4d96-ae36-bb7295b28a87\") " pod="openshift-marketplace/marketplace-operator-79b997595-m2brn" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.159568 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.171113 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/256b478f-1967-4699-857d-bc41f69654f2-images\") pod \"machine-config-operator-74547568cd-m6vx8\" (UID: \"256b478f-1967-4699-857d-bc41f69654f2\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-m6vx8" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.171457 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/d7b4ba4d-dc1d-4720-81f3-57059d529def-profile-collector-cert\") pod \"catalog-operator-68c6474976-phhx7\" (UID: \"d7b4ba4d-dc1d-4720-81f3-57059d529def\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-phhx7" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.171458 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/8fb86530-c6d5-4e68-960b-2eadac1c8973-apiservice-cert\") pod \"packageserver-d55dfcdfc-w8nsc\" (UID: \"8fb86530-c6d5-4e68-960b-2eadac1c8973\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-w8nsc" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.172070 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/c0332d4a-fb12-4d96-ae36-bb7295b28a87-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-m2brn\" (UID: \"c0332d4a-fb12-4d96-ae36-bb7295b28a87\") " pod="openshift-marketplace/marketplace-operator-79b997595-m2brn" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.173730 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c11241f4-6831-47d6-b5de-e8da9ccf7cad-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-58trd\" (UID: \"c11241f4-6831-47d6-b5de-e8da9ccf7cad\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-58trd" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.173805 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/13da1899-cf91-483d-99ea-7c6aa673e4f7-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-hwzmx\" (UID: \"13da1899-cf91-483d-99ea-7c6aa673e4f7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hwzmx" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.174808 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c490e95d-e462-45b2-8352-9603283319e1-default-certificate\") pod \"router-default-5444994796-4459k\" (UID: \"c490e95d-e462-45b2-8352-9603283319e1\") " pod="openshift-ingress/router-default-5444994796-4459k" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.176522 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.177560 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/8a5b34e0-9ec6-42cb-902c-0c8336b514ff-profile-collector-cert\") pod \"olm-operator-6b444d44fb-9fhn9\" (UID: \"8a5b34e0-9ec6-42cb-902c-0c8336b514ff\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-9fhn9" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.178306 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c490e95d-e462-45b2-8352-9603283319e1-service-ca-bundle\") pod \"router-default-5444994796-4459k\" (UID: \"c490e95d-e462-45b2-8352-9603283319e1\") " pod="openshift-ingress/router-default-5444994796-4459k" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.185656 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.193601 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/256b478f-1967-4699-857d-bc41f69654f2-proxy-tls\") pod \"machine-config-operator-74547568cd-m6vx8\" (UID: \"256b478f-1967-4699-857d-bc41f69654f2\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-m6vx8" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.203418 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.208417 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c490e95d-e462-45b2-8352-9603283319e1-stats-auth\") pod \"router-default-5444994796-4459k\" (UID: \"c490e95d-e462-45b2-8352-9603283319e1\") " pod="openshift-ingress/router-default-5444994796-4459k" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.217760 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.218256 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7714bc37-dbe3-45b4-8bb2-84e9d9d05779-cert\") pod \"ingress-canary-xq97s\" (UID: \"7714bc37-dbe3-45b4-8bb2-84e9d9d05779\") " pod="openshift-ingress-canary/ingress-canary-xq97s" Jan 21 15:26:45 crc kubenswrapper[5021]: E0121 15:26:45.218328 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:45.718292003 +0000 UTC m=+147.253405892 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.218519 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.218712 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7z62q\" (UniqueName: \"kubernetes.io/projected/7714bc37-dbe3-45b4-8bb2-84e9d9d05779-kube-api-access-7z62q\") pod \"ingress-canary-xq97s\" (UID: \"7714bc37-dbe3-45b4-8bb2-84e9d9d05779\") " pod="openshift-ingress-canary/ingress-canary-xq97s" Jan 21 15:26:45 crc kubenswrapper[5021]: E0121 15:26:45.219480 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:45.71924371 +0000 UTC m=+147.254357769 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.229279 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.244491 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.260981 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cdd0f7b7-d192-425e-87ad-0ec36f050bd7-config\") pod \"etcd-operator-b45778765-6qfmg\" (UID: \"cdd0f7b7-d192-425e-87ad-0ec36f050bd7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6qfmg" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.261531 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/71337f59-d4e5-47da-9d8e-759bd17cfdc3-secret-volume\") pod \"collect-profiles-29483475-7s6jf\" (UID: \"71337f59-d4e5-47da-9d8e-759bd17cfdc3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483475-7s6jf" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.261692 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cdd0f7b7-d192-425e-87ad-0ec36f050bd7-serving-cert\") pod \"etcd-operator-b45778765-6qfmg\" (UID: \"cdd0f7b7-d192-425e-87ad-0ec36f050bd7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6qfmg" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.261755 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c11241f4-6831-47d6-b5de-e8da9ccf7cad-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-58trd\" (UID: \"c11241f4-6831-47d6-b5de-e8da9ccf7cad\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-58trd" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.261871 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/cdd0f7b7-d192-425e-87ad-0ec36f050bd7-etcd-ca\") pod \"etcd-operator-b45778765-6qfmg\" (UID: \"cdd0f7b7-d192-425e-87ad-0ec36f050bd7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6qfmg" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.262834 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.266839 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/cdd0f7b7-d192-425e-87ad-0ec36f050bd7-etcd-client\") pod \"etcd-operator-b45778765-6qfmg\" (UID: \"cdd0f7b7-d192-425e-87ad-0ec36f050bd7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6qfmg" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.275832 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3b265d32-a4f3-4a09-931e-6f6ac0b82c1c-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-p5qnh\" (UID: \"3b265d32-a4f3-4a09-931e-6f6ac0b82c1c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-p5qnh" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.285278 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.298865 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/4d5503fc-0527-456d-b97d-7a455bdf3e7f-metrics-tls\") pod \"ingress-operator-5b745b69d9-64lkf\" (UID: \"4d5503fc-0527-456d-b97d-7a455bdf3e7f\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-64lkf" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.302918 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.320038 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:45 crc kubenswrapper[5021]: E0121 15:26:45.320449 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:45.820406976 +0000 UTC m=+147.355520865 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.320614 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:45 crc kubenswrapper[5021]: E0121 15:26:45.321811 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:45.821774514 +0000 UTC m=+147.356888403 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.324653 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.349731 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.351891 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4d5503fc-0527-456d-b97d-7a455bdf3e7f-trusted-ca\") pod \"ingress-operator-5b745b69d9-64lkf\" (UID: \"4d5503fc-0527-456d-b97d-7a455bdf3e7f\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-64lkf" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.363049 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.383155 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.412197 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.419047 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3b265d32-a4f3-4a09-931e-6f6ac0b82c1c-config\") pod \"kube-controller-manager-operator-78b949d7b-p5qnh\" (UID: \"3b265d32-a4f3-4a09-931e-6f6ac0b82c1c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-p5qnh" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.421764 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:45 crc kubenswrapper[5021]: E0121 15:26:45.422508 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:45.922491047 +0000 UTC m=+147.457604936 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.424955 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.438985 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/0654cecd-38e1-4678-9452-5e8b8b1dd07f-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-gm5rm\" (UID: \"0654cecd-38e1-4678-9452-5e8b8b1dd07f\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-gm5rm" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.443955 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.462424 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.482099 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.487246 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af171da1-a5e5-4811-8621-3acb113bb571-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-k8lpr\" (UID: \"af171da1-a5e5-4811-8621-3acb113bb571\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-k8lpr" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.504739 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.523092 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.523492 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:45 crc kubenswrapper[5021]: E0121 15:26:45.524196 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:46.024184477 +0000 UTC m=+147.559298366 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.546226 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.561223 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/8ca0f13c-b009-400f-94cd-f50d9209eb6c-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-7t8pz\" (UID: \"8ca0f13c-b009-400f-94cd-f50d9209eb6c\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-7t8pz" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.563632 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.573363 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/8a5b34e0-9ec6-42cb-902c-0c8336b514ff-srv-cert\") pod \"olm-operator-6b444d44fb-9fhn9\" (UID: \"8a5b34e0-9ec6-42cb-902c-0c8336b514ff\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-9fhn9" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.583380 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.623203 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.625589 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:45 crc kubenswrapper[5021]: E0121 15:26:45.625728 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:46.125707324 +0000 UTC m=+147.660821213 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.625876 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:45 crc kubenswrapper[5021]: E0121 15:26:45.626202 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:46.126193917 +0000 UTC m=+147.661307806 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.642466 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.665694 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/1530383e-f6f4-47de-8302-dfe172a883e7-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-gglqm\" (UID: \"1530383e-f6f4-47de-8302-dfe172a883e7\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-gglqm" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.666579 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.667066 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f90d4209-f294-478a-85fd-42c8e91bd6aa-serving-cert\") pod \"service-ca-operator-777779d784-jm4qh\" (UID: \"f90d4209-f294-478a-85fd-42c8e91bd6aa\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-jm4qh" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.688041 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.692419 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-kn7v9"] Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.700115 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-zf7sb"] Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.705093 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Jan 21 15:26:45 crc kubenswrapper[5021]: W0121 15:26:45.709569 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8880753d_187b_4b18_a292_43ed561b6d8d.slice/crio-8f354eef16292809d14e0e07c92f7c3bf27b0abd1f8fe3561675dfdcdbcd8ddc WatchSource:0}: Error finding container 8f354eef16292809d14e0e07c92f7c3bf27b0abd1f8fe3561675dfdcdbcd8ddc: Status 404 returned error can't find the container with id 8f354eef16292809d14e0e07c92f7c3bf27b0abd1f8fe3561675dfdcdbcd8ddc Jan 21 15:26:45 crc kubenswrapper[5021]: W0121 15:26:45.710873 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd8a36d28_fc2f_44fc_adca_fe218362ba3a.slice/crio-2f49e8587729bfff3ebd731c977ddf457805dd45883a9e80eec67cd178233688 WatchSource:0}: Error finding container 2f49e8587729bfff3ebd731c977ddf457805dd45883a9e80eec67cd178233688: Status 404 returned error can't find the container with id 2f49e8587729bfff3ebd731c977ddf457805dd45883a9e80eec67cd178233688 Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.718798 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-xlz6c"] Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.720720 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/8dbbe964-1a4c-456b-aa6c-e8473ef20bfc-signing-key\") pod \"service-ca-9c57cc56f-skc42\" (UID: \"8dbbe964-1a4c-456b-aa6c-e8473ef20bfc\") " pod="openshift-service-ca/service-ca-9c57cc56f-skc42" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.725194 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.726475 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:45 crc kubenswrapper[5021]: E0121 15:26:45.726834 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:46.226805927 +0000 UTC m=+147.761919836 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.727078 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:45 crc kubenswrapper[5021]: E0121 15:26:45.727420 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:46.227408644 +0000 UTC m=+147.762522533 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.729955 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-vzlcs"] Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.744610 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.746030 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/8dbbe964-1a4c-456b-aa6c-e8473ef20bfc-signing-cabundle\") pod \"service-ca-9c57cc56f-skc42\" (UID: \"8dbbe964-1a4c-456b-aa6c-e8473ef20bfc\") " pod="openshift-service-ca/service-ca-9c57cc56f-skc42" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.751778 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-qcz9k"] Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.753716 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-xcc8f"] Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.761680 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-lp8vp"] Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.762809 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.786095 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.795160 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-kn7v9" event={"ID":"d8a36d28-fc2f-44fc-adca-fe218362ba3a","Type":"ContainerStarted","Data":"2f49e8587729bfff3ebd731c977ddf457805dd45883a9e80eec67cd178233688"} Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.796272 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qcz9k" event={"ID":"b17f4af7-2215-4afd-810b-ae1f9a5ca41a","Type":"ContainerStarted","Data":"517689de5d33d982690d553bd3a06b73242552eb0aa412487516a946cddf9262"} Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.797070 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f90d4209-f294-478a-85fd-42c8e91bd6aa-config\") pod \"service-ca-operator-777779d784-jm4qh\" (UID: \"f90d4209-f294-478a-85fd-42c8e91bd6aa\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-jm4qh" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.800749 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4c5bh" event={"ID":"680fb251-0f8f-4a72-a386-9ee555576980","Type":"ContainerStarted","Data":"fe22fdb29eb44050b6d6d6c77d786536958362f51c2d1a5fa62a03c61a2292d6"} Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.802091 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-xlz6c" event={"ID":"521d3dca-6ae7-48f6-a3bc-859493564f8d","Type":"ContainerStarted","Data":"cf5caa5398b1e7839be4ffcbed1d10007428320f2d4443073644cb46053a1690"} Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.802301 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.803478 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-vzlcs" event={"ID":"1eae8258-0ffa-4aad-9ac4-747259f4cae0","Type":"ContainerStarted","Data":"42c35fd85dcd1c1c281550be9f0eb30ae9fb483772b0821bd2c33865171a5a0a"} Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.804357 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-zf7sb" event={"ID":"8880753d-187b-4b18-a292-43ed561b6d8d","Type":"ContainerStarted","Data":"8f354eef16292809d14e0e07c92f7c3bf27b0abd1f8fe3561675dfdcdbcd8ddc"} Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.827876 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-cgt27"] Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.828842 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:45 crc kubenswrapper[5021]: E0121 15:26:45.829759 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:46.329728142 +0000 UTC m=+147.864842031 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.839729 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wctv7\" (UniqueName: \"kubernetes.io/projected/f384f01c-b331-4051-aef7-8da3fbbad2ab-kube-api-access-wctv7\") pod \"dns-default-vpddf\" (UID: \"f384f01c-b331-4051-aef7-8da3fbbad2ab\") " pod="openshift-dns/dns-default-vpddf" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.842987 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.850947 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/ad524c85-51c3-47c1-b649-eca2cacdf4a6-certs\") pod \"machine-config-server-hkbvp\" (UID: \"ad524c85-51c3-47c1-b649-eca2cacdf4a6\") " pod="openshift-machine-config-operator/machine-config-server-hkbvp" Jan 21 15:26:45 crc kubenswrapper[5021]: W0121 15:26:45.855578 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod405cbe2f_c2e9_4d3a_9ed7_cf937eb2cead.slice/crio-30b6a6abc65cac7d2327943c5076908b3826fef1ecae72506f73bd65174b500b WatchSource:0}: Error finding container 30b6a6abc65cac7d2327943c5076908b3826fef1ecae72506f73bd65174b500b: Status 404 returned error can't find the container with id 30b6a6abc65cac7d2327943c5076908b3826fef1ecae72506f73bd65174b500b Jan 21 15:26:45 crc kubenswrapper[5021]: W0121 15:26:45.858700 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda62ba26d_f037_478b_8dd1_47ffb968b8a6.slice/crio-9606b9cdc1fa3cf0146e26e33f9a34586743cdd0078eefb16bfa2327b9dcb088 WatchSource:0}: Error finding container 9606b9cdc1fa3cf0146e26e33f9a34586743cdd0078eefb16bfa2327b9dcb088: Status 404 returned error can't find the container with id 9606b9cdc1fa3cf0146e26e33f9a34586743cdd0078eefb16bfa2327b9dcb088 Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.861770 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.885035 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.887440 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-cvs5m"] Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.889556 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-khdzj"] Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.892513 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/ad524c85-51c3-47c1-b649-eca2cacdf4a6-node-bootstrap-token\") pod \"machine-config-server-hkbvp\" (UID: \"ad524c85-51c3-47c1-b649-eca2cacdf4a6\") " pod="openshift-machine-config-operator/machine-config-server-hkbvp" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.894375 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-g8wp8"] Jan 21 15:26:45 crc kubenswrapper[5021]: E0121 15:26:45.899079 5021 secret.go:188] Couldn't get secret openshift-dns/dns-default-metrics-tls: failed to sync secret cache: timed out waiting for the condition Jan 21 15:26:45 crc kubenswrapper[5021]: E0121 15:26:45.899155 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f384f01c-b331-4051-aef7-8da3fbbad2ab-metrics-tls podName:f384f01c-b331-4051-aef7-8da3fbbad2ab nodeName:}" failed. No retries permitted until 2026-01-21 15:26:46.399136738 +0000 UTC m=+147.934250627 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/f384f01c-b331-4051-aef7-8da3fbbad2ab-metrics-tls") pod "dns-default-vpddf" (UID: "f384f01c-b331-4051-aef7-8da3fbbad2ab") : failed to sync secret cache: timed out waiting for the condition Jan 21 15:26:45 crc kubenswrapper[5021]: E0121 15:26:45.899075 5021 configmap.go:193] Couldn't get configMap openshift-dns/dns-default: failed to sync configmap cache: timed out waiting for the condition Jan 21 15:26:45 crc kubenswrapper[5021]: E0121 15:26:45.899331 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/f384f01c-b331-4051-aef7-8da3fbbad2ab-config-volume podName:f384f01c-b331-4051-aef7-8da3fbbad2ab nodeName:}" failed. No retries permitted until 2026-01-21 15:26:46.399297253 +0000 UTC m=+147.934411292 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-volume" (UniqueName: "kubernetes.io/configmap/f384f01c-b331-4051-aef7-8da3fbbad2ab-config-volume") pod "dns-default-vpddf" (UID: "f384f01c-b331-4051-aef7-8da3fbbad2ab") : failed to sync configmap cache: timed out waiting for the condition Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.903695 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.921023 5021 request.go:700] Waited for 1.012278443s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-ingress-canary/configmaps?fieldSelector=metadata.name%3Dkube-root-ca.crt&limit=500&resourceVersion=0 Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.923187 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.931041 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:45 crc kubenswrapper[5021]: E0121 15:26:45.931582 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:46.431568127 +0000 UTC m=+147.966682016 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.944030 5021 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.961863 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.973133 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-92qbd"] Jan 21 15:26:45 crc kubenswrapper[5021]: I0121 15:26:45.982437 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:45.999757 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-bw5fp"] Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.004279 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.017467 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zld8h"] Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.024683 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.027105 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-wlk6l"] Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.032063 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:46 crc kubenswrapper[5021]: E0121 15:26:46.032383 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:46.532327791 +0000 UTC m=+148.067441680 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.033064 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:46 crc kubenswrapper[5021]: E0121 15:26:46.033517 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:46.533499904 +0000 UTC m=+148.068613783 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:46 crc kubenswrapper[5021]: W0121 15:26:46.036561 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd4b323b2_0188_4e04_ab45_bb9689a750a2.slice/crio-9161386e90c503d545158086df6416a26900a7b5a48279c642d77dda68eb4382 WatchSource:0}: Error finding container 9161386e90c503d545158086df6416a26900a7b5a48279c642d77dda68eb4382: Status 404 returned error can't find the container with id 9161386e90c503d545158086df6416a26900a7b5a48279c642d77dda68eb4382 Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.043216 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 21 15:26:46 crc kubenswrapper[5021]: W0121 15:26:46.043300 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd285a21e_c631_453d_9f22_cbaa397f714a.slice/crio-6ee57c06c18c499975a17e5324fa6fa97b0ee641a7c9195d8a2e927152b31ded WatchSource:0}: Error finding container 6ee57c06c18c499975a17e5324fa6fa97b0ee641a7c9195d8a2e927152b31ded: Status 404 returned error can't find the container with id 6ee57c06c18c499975a17e5324fa6fa97b0ee641a7c9195d8a2e927152b31ded Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.062862 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.072893 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7714bc37-dbe3-45b4-8bb2-84e9d9d05779-cert\") pod \"ingress-canary-xq97s\" (UID: \"7714bc37-dbe3-45b4-8bb2-84e9d9d05779\") " pod="openshift-ingress-canary/ingress-canary-xq97s" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.099955 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rj5gj\" (UniqueName: \"kubernetes.io/projected/e1a2d469-35da-4253-b3a5-057b68c4d68b-kube-api-access-rj5gj\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.119615 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/e1a2d469-35da-4253-b3a5-057b68c4d68b-bound-sa-token\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.134336 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:46 crc kubenswrapper[5021]: E0121 15:26:46.134528 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:46.634498005 +0000 UTC m=+148.169611904 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.135030 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:46 crc kubenswrapper[5021]: E0121 15:26:46.135665 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:46.635641678 +0000 UTC m=+148.170755567 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.164324 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5b7r7\" (UniqueName: \"kubernetes.io/projected/cdd0f7b7-d192-425e-87ad-0ec36f050bd7-kube-api-access-5b7r7\") pod \"etcd-operator-b45778765-6qfmg\" (UID: \"cdd0f7b7-d192-425e-87ad-0ec36f050bd7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6qfmg" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.185019 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8jtxp\" (UniqueName: \"kubernetes.io/projected/f0a0868f-a7c7-4bce-a9b5-855a11e2631e-kube-api-access-8jtxp\") pod \"csi-hostpathplugin-pkmbf\" (UID: \"f0a0868f-a7c7-4bce-a9b5-855a11e2631e\") " pod="hostpath-provisioner/csi-hostpathplugin-pkmbf" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.187224 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/71337f59-d4e5-47da-9d8e-759bd17cfdc3-config-volume\") pod \"collect-profiles-29483475-7s6jf\" (UID: \"71337f59-d4e5-47da-9d8e-759bd17cfdc3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483475-7s6jf" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.188661 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-6qfmg" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.200523 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gjdt5\" (UniqueName: \"kubernetes.io/projected/8dbbe964-1a4c-456b-aa6c-e8473ef20bfc-kube-api-access-gjdt5\") pod \"service-ca-9c57cc56f-skc42\" (UID: \"8dbbe964-1a4c-456b-aa6c-e8473ef20bfc\") " pod="openshift-service-ca/service-ca-9c57cc56f-skc42" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.220035 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bbrn8\" (UniqueName: \"kubernetes.io/projected/da0f51c3-e6b4-49ae-8286-500e3ff30211-kube-api-access-bbrn8\") pod \"migrator-59844c95c7-hd2pm\" (UID: \"da0f51c3-e6b4-49ae-8286-500e3ff30211\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-hd2pm" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.235883 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:46 crc kubenswrapper[5021]: E0121 15:26:46.236092 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:46.736065811 +0000 UTC m=+148.271179700 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.236415 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:46 crc kubenswrapper[5021]: E0121 15:26:46.237150 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:46.737111281 +0000 UTC m=+148.272225170 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.242040 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rzrjh\" (UniqueName: \"kubernetes.io/projected/4d5503fc-0527-456d-b97d-7a455bdf3e7f-kube-api-access-rzrjh\") pod \"ingress-operator-5b745b69d9-64lkf\" (UID: \"4d5503fc-0527-456d-b97d-7a455bdf3e7f\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-64lkf" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.258533 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c11241f4-6831-47d6-b5de-e8da9ccf7cad-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-58trd\" (UID: \"c11241f4-6831-47d6-b5de-e8da9ccf7cad\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-58trd" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.263432 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-skc42" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.263437 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-pkmbf" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.273653 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-58trd" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.280697 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4c8x2\" (UniqueName: \"kubernetes.io/projected/1530383e-f6f4-47de-8302-dfe172a883e7-kube-api-access-4c8x2\") pod \"control-plane-machine-set-operator-78cbb6b69f-gglqm\" (UID: \"1530383e-f6f4-47de-8302-dfe172a883e7\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-gglqm" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.303264 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9ftnh\" (UniqueName: \"kubernetes.io/projected/13da1899-cf91-483d-99ea-7c6aa673e4f7-kube-api-access-9ftnh\") pod \"kube-storage-version-migrator-operator-b67b599dd-hwzmx\" (UID: \"13da1899-cf91-483d-99ea-7c6aa673e4f7\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hwzmx" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.316702 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5mqdr\" (UniqueName: \"kubernetes.io/projected/c490e95d-e462-45b2-8352-9603283319e1-kube-api-access-5mqdr\") pod \"router-default-5444994796-4459k\" (UID: \"c490e95d-e462-45b2-8352-9603283319e1\") " pod="openshift-ingress/router-default-5444994796-4459k" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.325684 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hwzmx" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.337361 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:46 crc kubenswrapper[5021]: E0121 15:26:46.337548 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:46.837515866 +0000 UTC m=+148.372629755 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.337756 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:46 crc kubenswrapper[5021]: E0121 15:26:46.338130 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:46.838118272 +0000 UTC m=+148.373232171 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.338810 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rg6z6\" (UniqueName: \"kubernetes.io/projected/8ca0f13c-b009-400f-94cd-f50d9209eb6c-kube-api-access-rg6z6\") pod \"package-server-manager-789f6589d5-7t8pz\" (UID: \"8ca0f13c-b009-400f-94cd-f50d9209eb6c\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-7t8pz" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.356827 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-7t8pz" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.376139 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-hd2pm" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.379111 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-76kmh\" (UniqueName: \"kubernetes.io/projected/0654cecd-38e1-4678-9452-5e8b8b1dd07f-kube-api-access-76kmh\") pod \"multus-admission-controller-857f4d67dd-gm5rm\" (UID: \"0654cecd-38e1-4678-9452-5e8b8b1dd07f\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-gm5rm" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.393363 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-gm5rm" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.398252 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2tt9x\" (UniqueName: \"kubernetes.io/projected/256b478f-1967-4699-857d-bc41f69654f2-kube-api-access-2tt9x\") pod \"machine-config-operator-74547568cd-m6vx8\" (UID: \"256b478f-1967-4699-857d-bc41f69654f2\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-m6vx8" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.404716 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kfcpf\" (UniqueName: \"kubernetes.io/projected/d7b4ba4d-dc1d-4720-81f3-57059d529def-kube-api-access-kfcpf\") pod \"catalog-operator-68c6474976-phhx7\" (UID: \"d7b4ba4d-dc1d-4720-81f3-57059d529def\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-phhx7" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.418040 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/4d5503fc-0527-456d-b97d-7a455bdf3e7f-bound-sa-token\") pod \"ingress-operator-5b745b69d9-64lkf\" (UID: \"4d5503fc-0527-456d-b97d-7a455bdf3e7f\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-64lkf" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.438869 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:46 crc kubenswrapper[5021]: E0121 15:26:46.439013 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:46.938986079 +0000 UTC m=+148.474099968 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.439362 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.439557 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f384f01c-b331-4051-aef7-8da3fbbad2ab-config-volume\") pod \"dns-default-vpddf\" (UID: \"f384f01c-b331-4051-aef7-8da3fbbad2ab\") " pod="openshift-dns/dns-default-vpddf" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.439730 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/f384f01c-b331-4051-aef7-8da3fbbad2ab-metrics-tls\") pod \"dns-default-vpddf\" (UID: \"f384f01c-b331-4051-aef7-8da3fbbad2ab\") " pod="openshift-dns/dns-default-vpddf" Jan 21 15:26:46 crc kubenswrapper[5021]: E0121 15:26:46.439798 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:46.939771442 +0000 UTC m=+148.474885331 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.439803 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xtkpx\" (UniqueName: \"kubernetes.io/projected/f90d4209-f294-478a-85fd-42c8e91bd6aa-kube-api-access-xtkpx\") pod \"service-ca-operator-777779d784-jm4qh\" (UID: \"f90d4209-f294-478a-85fd-42c8e91bd6aa\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-jm4qh" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.459791 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f6ktr\" (UniqueName: \"kubernetes.io/projected/8a5b34e0-9ec6-42cb-902c-0c8336b514ff-kube-api-access-f6ktr\") pod \"olm-operator-6b444d44fb-9fhn9\" (UID: \"8a5b34e0-9ec6-42cb-902c-0c8336b514ff\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-9fhn9" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.472465 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-m6vx8" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.479824 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-278gw\" (UniqueName: \"kubernetes.io/projected/ad524c85-51c3-47c1-b649-eca2cacdf4a6-kube-api-access-278gw\") pod \"machine-config-server-hkbvp\" (UID: \"ad524c85-51c3-47c1-b649-eca2cacdf4a6\") " pod="openshift-machine-config-operator/machine-config-server-hkbvp" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.498886 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7146e72d-adb4-4283-aad0-8ed4b6363be9-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-kmdn7\" (UID: \"7146e72d-adb4-4283-aad0-8ed4b6363be9\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-kmdn7" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.504076 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-9fhn9" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.516625 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zvnp4\" (UniqueName: \"kubernetes.io/projected/c0332d4a-fb12-4d96-ae36-bb7295b28a87-kube-api-access-zvnp4\") pod \"marketplace-operator-79b997595-m2brn\" (UID: \"c0332d4a-fb12-4d96-ae36-bb7295b28a87\") " pod="openshift-marketplace/marketplace-operator-79b997595-m2brn" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.537549 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3b265d32-a4f3-4a09-931e-6f6ac0b82c1c-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-p5qnh\" (UID: \"3b265d32-a4f3-4a09-931e-6f6ac0b82c1c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-p5qnh" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.540601 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:46 crc kubenswrapper[5021]: E0121 15:26:46.541131 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:47.041114982 +0000 UTC m=+148.576228871 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.557613 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4v4mq\" (UniqueName: \"kubernetes.io/projected/af171da1-a5e5-4811-8621-3acb113bb571-kube-api-access-4v4mq\") pod \"openshift-controller-manager-operator-756b6f6bc6-k8lpr\" (UID: \"af171da1-a5e5-4811-8621-3acb113bb571\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-k8lpr" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.562763 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-jm4qh" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.564170 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-gglqm" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.571740 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-kmdn7" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.577997 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-4459k" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.582332 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9bdw9\" (UniqueName: \"kubernetes.io/projected/8fb86530-c6d5-4e68-960b-2eadac1c8973-kube-api-access-9bdw9\") pod \"packageserver-d55dfcdfc-w8nsc\" (UID: \"8fb86530-c6d5-4e68-960b-2eadac1c8973\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-w8nsc" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.597638 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-phhx7" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.600310 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-slw58\" (UniqueName: \"kubernetes.io/projected/d5642370-ee34-4ee6-8ae8-0951768da987-kube-api-access-slw58\") pod \"machine-config-controller-84d6567774-ntg54\" (UID: \"d5642370-ee34-4ee6-8ae8-0951768da987\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ntg54" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.619610 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-w8nsc" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.619954 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7pphp\" (UniqueName: \"kubernetes.io/projected/71337f59-d4e5-47da-9d8e-759bd17cfdc3-kube-api-access-7pphp\") pod \"collect-profiles-29483475-7s6jf\" (UID: \"71337f59-d4e5-47da-9d8e-759bd17cfdc3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483475-7s6jf" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.628408 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ntg54" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.640891 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7z62q\" (UniqueName: \"kubernetes.io/projected/7714bc37-dbe3-45b4-8bb2-84e9d9d05779-kube-api-access-7z62q\") pod \"ingress-canary-xq97s\" (UID: \"7714bc37-dbe3-45b4-8bb2-84e9d9d05779\") " pod="openshift-ingress-canary/ingress-canary-xq97s" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.642642 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.642857 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.642934 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-m2brn" Jan 21 15:26:46 crc kubenswrapper[5021]: E0121 15:26:46.643273 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:47.143216264 +0000 UTC m=+148.678330153 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.650640 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f384f01c-b331-4051-aef7-8da3fbbad2ab-config-volume\") pod \"dns-default-vpddf\" (UID: \"f384f01c-b331-4051-aef7-8da3fbbad2ab\") " pod="openshift-dns/dns-default-vpddf" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.667072 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-k8lpr" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.683115 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.684469 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-64lkf" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.693962 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/f384f01c-b331-4051-aef7-8da3fbbad2ab-metrics-tls\") pod \"dns-default-vpddf\" (UID: \"f384f01c-b331-4051-aef7-8da3fbbad2ab\") " pod="openshift-dns/dns-default-vpddf" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.707595 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-hkbvp" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.724020 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.733813 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-vpddf" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.743743 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:46 crc kubenswrapper[5021]: E0121 15:26:46.744003 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:47.243975499 +0000 UTC m=+148.779089388 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.745760 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:46 crc kubenswrapper[5021]: E0121 15:26:46.746513 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:47.24649713 +0000 UTC m=+148.781611029 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.792820 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-p5qnh" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.809180 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"8f5da986a4de70fbf976ba9a30bac8f112280ed0874ba5078bec32453448df5e"} Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.810680 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"b69ae0dbd5744e5985a2f1747e248b4a85cab8424687132e4b6c9b63ec128e4b"} Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.815332 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-cgt27" event={"ID":"a62ba26d-f037-478b-8dd1-47ffb968b8a6","Type":"ContainerStarted","Data":"9606b9cdc1fa3cf0146e26e33f9a34586743cdd0078eefb16bfa2327b9dcb088"} Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.817939 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlk6l" event={"ID":"549f101f-6acf-41be-9263-57bb5902cbd6","Type":"ContainerStarted","Data":"82ec8d9dcec41bced3f61824df7872b3b1db374875e2a378fe229b1f5bbb6d8a"} Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.819202 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-lp8vp" event={"ID":"405cbe2f-c2e9-4d3a-9ed7-cf937eb2cead","Type":"ContainerStarted","Data":"30b6a6abc65cac7d2327943c5076908b3826fef1ecae72506f73bd65174b500b"} Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.820293 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-92qbd" event={"ID":"d4b323b2-0188-4e04-ab45-bb9689a750a2","Type":"ContainerStarted","Data":"9161386e90c503d545158086df6416a26900a7b5a48279c642d77dda68eb4382"} Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.822609 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483475-7s6jf" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.823810 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"62a66939b0440e86c955e18aca4f2f06aaa2a7caea4a1bd096d72205a9e428f2"} Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.824890 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" event={"ID":"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d","Type":"ContainerStarted","Data":"17e6e6f68d15cb5ef97c05bbd10f2296804005b3b2aa39b22e81be766e7f5239"} Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.826089 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-g8wp8" event={"ID":"e72ac95a-ad13-408c-b595-9e983c185119","Type":"ContainerStarted","Data":"056c84b485308f9e905b5086e061c0f391a5fd54309eae9e7df77fbae4202844"} Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.827325 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-cvs5m" event={"ID":"602fe93d-2ebf-458c-8dfb-2cd9ba0ce57b","Type":"ContainerStarted","Data":"b6246370d7dc0301d5bf179306df4f1d6ed297673bf8fa5045d2275424809158"} Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.828351 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zld8h" event={"ID":"d285a21e-c631-453d-9f22-cbaa397f714a","Type":"ContainerStarted","Data":"6ee57c06c18c499975a17e5324fa6fa97b0ee641a7c9195d8a2e927152b31ded"} Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.829272 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-khdzj" event={"ID":"279af6f8-0575-4617-9391-8251ab3db6f9","Type":"ContainerStarted","Data":"7a722cf812987c64bd522da4bdc9c2b9c8dfa4d67863d8f33fb57b5775e52050"} Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.854782 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:46 crc kubenswrapper[5021]: E0121 15:26:46.855006 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:47.354978654 +0000 UTC m=+148.890092543 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.857253 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:46 crc kubenswrapper[5021]: E0121 15:26:46.857681 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:47.357653598 +0000 UTC m=+148.892767647 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.863490 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-xq97s" Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.958459 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:46 crc kubenswrapper[5021]: E0121 15:26:46.959277 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:47.458847215 +0000 UTC m=+148.993961104 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:46 crc kubenswrapper[5021]: I0121 15:26:46.960822 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:46 crc kubenswrapper[5021]: E0121 15:26:46.961452 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:47.461442849 +0000 UTC m=+148.996556738 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:47 crc kubenswrapper[5021]: I0121 15:26:47.063822 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:47 crc kubenswrapper[5021]: E0121 15:26:47.064187 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:47.564171478 +0000 UTC m=+149.099285367 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:47 crc kubenswrapper[5021]: I0121 15:26:47.166273 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:47 crc kubenswrapper[5021]: E0121 15:26:47.166757 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:47.666734033 +0000 UTC m=+149.201847922 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:47 crc kubenswrapper[5021]: I0121 15:26:47.249177 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-6qfmg"] Jan 21 15:26:47 crc kubenswrapper[5021]: I0121 15:26:47.267686 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:47 crc kubenswrapper[5021]: E0121 15:26:47.268439 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:47.768417354 +0000 UTC m=+149.303531243 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:47 crc kubenswrapper[5021]: I0121 15:26:47.352272 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-skc42"] Jan 21 15:26:47 crc kubenswrapper[5021]: I0121 15:26:47.376895 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:47 crc kubenswrapper[5021]: E0121 15:26:47.377257 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:47.877235817 +0000 UTC m=+149.412349706 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:47 crc kubenswrapper[5021]: I0121 15:26:47.486249 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:47 crc kubenswrapper[5021]: E0121 15:26:47.487110 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:47.987091238 +0000 UTC m=+149.522205127 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:47 crc kubenswrapper[5021]: I0121 15:26:47.589004 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:47 crc kubenswrapper[5021]: E0121 15:26:47.589849 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:48.089834548 +0000 UTC m=+149.624948437 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:47 crc kubenswrapper[5021]: I0121 15:26:47.690961 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:47 crc kubenswrapper[5021]: E0121 15:26:47.691338 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:48.191317653 +0000 UTC m=+149.726431542 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:47 crc kubenswrapper[5021]: I0121 15:26:47.713065 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-gm5rm"] Jan 21 15:26:47 crc kubenswrapper[5021]: W0121 15:26:47.717522 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8dbbe964_1a4c_456b_aa6c_e8473ef20bfc.slice/crio-3c666213b16141f78086fd1df35d10571795cdb187e4a23addc63a56dcdb5937 WatchSource:0}: Error finding container 3c666213b16141f78086fd1df35d10571795cdb187e4a23addc63a56dcdb5937: Status 404 returned error can't find the container with id 3c666213b16141f78086fd1df35d10571795cdb187e4a23addc63a56dcdb5937 Jan 21 15:26:47 crc kubenswrapper[5021]: I0121 15:26:47.725754 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hwzmx"] Jan 21 15:26:47 crc kubenswrapper[5021]: W0121 15:26:47.728233 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podad524c85_51c3_47c1_b649_eca2cacdf4a6.slice/crio-73aed94d593fd038b5ef40073aa42f8a2d3d65ba0fcc95c2b1f341ecba8382b1 WatchSource:0}: Error finding container 73aed94d593fd038b5ef40073aa42f8a2d3d65ba0fcc95c2b1f341ecba8382b1: Status 404 returned error can't find the container with id 73aed94d593fd038b5ef40073aa42f8a2d3d65ba0fcc95c2b1f341ecba8382b1 Jan 21 15:26:47 crc kubenswrapper[5021]: I0121 15:26:47.792769 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:47 crc kubenswrapper[5021]: E0121 15:26:47.793237 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:48.293204209 +0000 UTC m=+149.828318098 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:47 crc kubenswrapper[5021]: I0121 15:26:47.803536 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-hd2pm"] Jan 21 15:26:47 crc kubenswrapper[5021]: I0121 15:26:47.876003 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-kn7v9" event={"ID":"d8a36d28-fc2f-44fc-adca-fe218362ba3a","Type":"ContainerStarted","Data":"1b0103dd68dc60f7fa8f43787602f33bde3945bf28dbebe118119e928f9042c3"} Jan 21 15:26:47 crc kubenswrapper[5021]: I0121 15:26:47.878510 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-hkbvp" event={"ID":"ad524c85-51c3-47c1-b649-eca2cacdf4a6","Type":"ContainerStarted","Data":"73aed94d593fd038b5ef40073aa42f8a2d3d65ba0fcc95c2b1f341ecba8382b1"} Jan 21 15:26:47 crc kubenswrapper[5021]: I0121 15:26:47.884811 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4c5bh" event={"ID":"680fb251-0f8f-4a72-a386-9ee555576980","Type":"ContainerStarted","Data":"5633342b2cfb1063d8c8471af42be20e624615fa14d18df75d3c774d44b66328"} Jan 21 15:26:47 crc kubenswrapper[5021]: I0121 15:26:47.888739 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-skc42" event={"ID":"8dbbe964-1a4c-456b-aa6c-e8473ef20bfc","Type":"ContainerStarted","Data":"3c666213b16141f78086fd1df35d10571795cdb187e4a23addc63a56dcdb5937"} Jan 21 15:26:47 crc kubenswrapper[5021]: I0121 15:26:47.889943 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qcz9k" event={"ID":"b17f4af7-2215-4afd-810b-ae1f9a5ca41a","Type":"ContainerStarted","Data":"2948390a1a0f2621360b10ad0a73165e182ffa557d9af13c423c6c76dec7ac77"} Jan 21 15:26:47 crc kubenswrapper[5021]: I0121 15:26:47.890954 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-cgt27" event={"ID":"a62ba26d-f037-478b-8dd1-47ffb968b8a6","Type":"ContainerStarted","Data":"fe2df07ccbe52317b9c17a1adf16516d221adcd14f914a0883a256f9d7f4bcbb"} Jan 21 15:26:47 crc kubenswrapper[5021]: I0121 15:26:47.894040 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:47 crc kubenswrapper[5021]: E0121 15:26:47.894423 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:48.394389635 +0000 UTC m=+149.929503534 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:47 crc kubenswrapper[5021]: I0121 15:26:47.895404 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-zf7sb" event={"ID":"8880753d-187b-4b18-a292-43ed561b6d8d","Type":"ContainerStarted","Data":"a4a437e53371815967692028a224f3ff3fe93c370165028b038b3d2ec4efb3f0"} Jan 21 15:26:47 crc kubenswrapper[5021]: I0121 15:26:47.898555 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-lp8vp" event={"ID":"405cbe2f-c2e9-4d3a-9ed7-cf937eb2cead","Type":"ContainerStarted","Data":"2df9a16a6c9abad46c337f322e92fcb5fd495d95eae44fdfb94b5fdc2efb4fb8"} Jan 21 15:26:47 crc kubenswrapper[5021]: I0121 15:26:47.901487 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-xlz6c" event={"ID":"521d3dca-6ae7-48f6-a3bc-859493564f8d","Type":"ContainerStarted","Data":"f3653583bafdb7164675e125f282c25acbd352f9800d43ca5dda8d72eb83fe76"} Jan 21 15:26:47 crc kubenswrapper[5021]: I0121 15:26:47.903028 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-xcc8f" event={"ID":"8c09c820-79c0-4e63-b063-2f01381c96fd","Type":"ContainerStarted","Data":"44934bffe14224ee085ef595d818b4bfbae3dc586dac03566d52023d8a3f3806"} Jan 21 15:26:47 crc kubenswrapper[5021]: I0121 15:26:47.913458 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-6qfmg" event={"ID":"cdd0f7b7-d192-425e-87ad-0ec36f050bd7","Type":"ContainerStarted","Data":"cc1ec1d3a94f008a25bb6c7917483504f0b788c1d43166917120fc9543438505"} Jan 21 15:26:47 crc kubenswrapper[5021]: I0121 15:26:47.922608 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-4459k" event={"ID":"c490e95d-e462-45b2-8352-9603283319e1","Type":"ContainerStarted","Data":"c979274b89cf7111986ce2c9c467de697534c4fc6f9ab7c0f59abbcfcd841565"} Jan 21 15:26:47 crc kubenswrapper[5021]: I0121 15:26:47.924039 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-vzlcs" event={"ID":"1eae8258-0ffa-4aad-9ac4-747259f4cae0","Type":"ContainerStarted","Data":"ec3814c246b082bbfe966f67c155b3bd4e7477c16f2a14fcdca8e20f5793eda4"} Jan 21 15:26:48 crc kubenswrapper[5021]: I0121 15:26:47.995572 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:48 crc kubenswrapper[5021]: E0121 15:26:47.995883 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:48.495868119 +0000 UTC m=+150.030982008 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:48 crc kubenswrapper[5021]: I0121 15:26:48.096924 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:48 crc kubenswrapper[5021]: E0121 15:26:48.103747 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:48.603717013 +0000 UTC m=+150.138830952 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:48 crc kubenswrapper[5021]: I0121 15:26:48.200643 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:48 crc kubenswrapper[5021]: E0121 15:26:48.201423 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:48.701409521 +0000 UTC m=+150.236523400 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:48 crc kubenswrapper[5021]: I0121 15:26:48.302642 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:48 crc kubenswrapper[5021]: E0121 15:26:48.303044 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:48.80302646 +0000 UTC m=+150.338140349 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:48 crc kubenswrapper[5021]: I0121 15:26:48.405553 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:48 crc kubenswrapper[5021]: E0121 15:26:48.405897 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:48.905883733 +0000 UTC m=+150.440997622 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:48 crc kubenswrapper[5021]: I0121 15:26:48.509455 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:48 crc kubenswrapper[5021]: E0121 15:26:48.509847 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:49.009829417 +0000 UTC m=+150.544943306 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:48 crc kubenswrapper[5021]: I0121 15:26:48.565712 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-7t8pz"] Jan 21 15:26:48 crc kubenswrapper[5021]: I0121 15:26:48.609645 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-9fhn9"] Jan 21 15:26:48 crc kubenswrapper[5021]: I0121 15:26:48.611324 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:48 crc kubenswrapper[5021]: E0121 15:26:48.611668 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:49.111657551 +0000 UTC m=+150.646771440 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:48 crc kubenswrapper[5021]: I0121 15:26:48.621540 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-58trd"] Jan 21 15:26:48 crc kubenswrapper[5021]: I0121 15:26:48.623114 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483475-7s6jf"] Jan 21 15:26:48 crc kubenswrapper[5021]: W0121 15:26:48.686517 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8ca0f13c_b009_400f_94cd_f50d9209eb6c.slice/crio-4dc83e36f3555c5650de376eb781a32eec82304711cbbc14920a229bdb8c92f4 WatchSource:0}: Error finding container 4dc83e36f3555c5650de376eb781a32eec82304711cbbc14920a229bdb8c92f4: Status 404 returned error can't find the container with id 4dc83e36f3555c5650de376eb781a32eec82304711cbbc14920a229bdb8c92f4 Jan 21 15:26:48 crc kubenswrapper[5021]: I0121 15:26:48.713537 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:48 crc kubenswrapper[5021]: E0121 15:26:48.713722 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:49.213683432 +0000 UTC m=+150.748797321 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:48 crc kubenswrapper[5021]: I0121 15:26:48.713765 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:48 crc kubenswrapper[5021]: E0121 15:26:48.714257 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:49.214240567 +0000 UTC m=+150.749354456 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:48 crc kubenswrapper[5021]: I0121 15:26:48.820418 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:48 crc kubenswrapper[5021]: E0121 15:26:48.820655 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:49.32062525 +0000 UTC m=+150.855739139 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:48 crc kubenswrapper[5021]: I0121 15:26:48.821458 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:48 crc kubenswrapper[5021]: E0121 15:26:48.843891 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:49.343860719 +0000 UTC m=+150.878974608 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:48 crc kubenswrapper[5021]: I0121 15:26:48.897083 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-m6vx8"] Jan 21 15:26:48 crc kubenswrapper[5021]: I0121 15:26:48.922753 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:48 crc kubenswrapper[5021]: E0121 15:26:48.923097 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:49.423058763 +0000 UTC m=+150.958172652 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:48 crc kubenswrapper[5021]: I0121 15:26:48.923522 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:48 crc kubenswrapper[5021]: E0121 15:26:48.923846 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:49.423831304 +0000 UTC m=+150.958945193 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:48 crc kubenswrapper[5021]: I0121 15:26:48.929968 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-xcc8f" event={"ID":"8c09c820-79c0-4e63-b063-2f01381c96fd","Type":"ContainerStarted","Data":"9fdaa4eaff50dffb791ad69e10dcc415ce3802a0cdbfad52f62497de7d14ed33"} Jan 21 15:26:48 crc kubenswrapper[5021]: I0121 15:26:48.932368 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"dd55be1ebfe9293dac92010a1cf111834a5fe8b7fddf2bf1c53161728968c462"} Jan 21 15:26:48 crc kubenswrapper[5021]: I0121 15:26:48.935573 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-gm5rm" event={"ID":"0654cecd-38e1-4678-9452-5e8b8b1dd07f","Type":"ContainerStarted","Data":"82977b3a27f29e435291639b790f4e221849c84aa6914a5595e0dd37ca87f1e2"} Jan 21 15:26:48 crc kubenswrapper[5021]: I0121 15:26:48.935606 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-gm5rm" event={"ID":"0654cecd-38e1-4678-9452-5e8b8b1dd07f","Type":"ContainerStarted","Data":"eed762bc3290eca3aea066f5654d801922de6c46cea356140a3bfb15a0cccb67"} Jan 21 15:26:48 crc kubenswrapper[5021]: I0121 15:26:48.937480 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zld8h" event={"ID":"d285a21e-c631-453d-9f22-cbaa397f714a","Type":"ContainerStarted","Data":"143ec2320f7622dbb25f8146699e6731e0499b78f1f529dbaa6b01dc03a0b64d"} Jan 21 15:26:48 crc kubenswrapper[5021]: I0121 15:26:48.939512 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-9fhn9" event={"ID":"8a5b34e0-9ec6-42cb-902c-0c8336b514ff","Type":"ContainerStarted","Data":"180725a6722804220402ea2030f9574a6c1c231d5dcfbc3fcb50158c12f3920a"} Jan 21 15:26:48 crc kubenswrapper[5021]: I0121 15:26:48.947647 5021 generic.go:334] "Generic (PLEG): container finished" podID="d8a36d28-fc2f-44fc-adca-fe218362ba3a" containerID="1b0103dd68dc60f7fa8f43787602f33bde3945bf28dbebe118119e928f9042c3" exitCode=0 Jan 21 15:26:48 crc kubenswrapper[5021]: I0121 15:26:48.948206 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-kn7v9" event={"ID":"d8a36d28-fc2f-44fc-adca-fe218362ba3a","Type":"ContainerDied","Data":"1b0103dd68dc60f7fa8f43787602f33bde3945bf28dbebe118119e928f9042c3"} Jan 21 15:26:48 crc kubenswrapper[5021]: I0121 15:26:48.952528 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-hkbvp" event={"ID":"ad524c85-51c3-47c1-b649-eca2cacdf4a6","Type":"ContainerStarted","Data":"c0b44e2ff3c4b6ccea51fcbdef101eba27ffe40df72b261c8d55442c4b5b3246"} Jan 21 15:26:48 crc kubenswrapper[5021]: I0121 15:26:48.953888 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-58trd" event={"ID":"c11241f4-6831-47d6-b5de-e8da9ccf7cad","Type":"ContainerStarted","Data":"5314d36474de33bfd7081fb503a4b5c02ecde64c7a31f4beb12b7193e7bee67e"} Jan 21 15:26:48 crc kubenswrapper[5021]: I0121 15:26:48.955150 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" event={"ID":"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d","Type":"ContainerStarted","Data":"f974ade6411edf3df31d2dd48279772af017d8b1a44dae42414625550c45e544"} Jan 21 15:26:48 crc kubenswrapper[5021]: I0121 15:26:48.955669 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" Jan 21 15:26:48 crc kubenswrapper[5021]: I0121 15:26:48.958708 5021 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-bw5fp container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.14:6443/healthz\": dial tcp 10.217.0.14:6443: connect: connection refused" start-of-body= Jan 21 15:26:48 crc kubenswrapper[5021]: I0121 15:26:48.958776 5021 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" podUID="cdbaee95-6f0a-4b0e-8969-0b9fb32f808d" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.14:6443/healthz\": dial tcp 10.217.0.14:6443: connect: connection refused" Jan 21 15:26:48 crc kubenswrapper[5021]: I0121 15:26:48.960050 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-7t8pz" event={"ID":"8ca0f13c-b009-400f-94cd-f50d9209eb6c","Type":"ContainerStarted","Data":"4dc83e36f3555c5650de376eb781a32eec82304711cbbc14920a229bdb8c92f4"} Jan 21 15:26:48 crc kubenswrapper[5021]: I0121 15:26:48.961711 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"f67d5455c0adf3b3a86ed5d40bdd8bf114e8781f0ec930fcfdba377e566fea35"} Jan 21 15:26:48 crc kubenswrapper[5021]: I0121 15:26:48.964125 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-khdzj" event={"ID":"279af6f8-0575-4617-9391-8251ab3db6f9","Type":"ContainerStarted","Data":"be1d3645628b201723fe8d262c88f615aa779c0829f726099f5872a33aaa4eeb"} Jan 21 15:26:48 crc kubenswrapper[5021]: I0121 15:26:48.965609 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29483475-7s6jf" event={"ID":"71337f59-d4e5-47da-9d8e-759bd17cfdc3","Type":"ContainerStarted","Data":"3cd7b7671acfb344981d740d2faad191af2766683e3769f26297e1db92a5246a"} Jan 21 15:26:48 crc kubenswrapper[5021]: I0121 15:26:48.967809 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-cvs5m" event={"ID":"602fe93d-2ebf-458c-8dfb-2cd9ba0ce57b","Type":"ContainerStarted","Data":"07d4f03e69a01224863044d3ed2e7b975509400a4063804dcd0db23f55ee56b5"} Jan 21 15:26:48 crc kubenswrapper[5021]: I0121 15:26:48.969899 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-92qbd" event={"ID":"d4b323b2-0188-4e04-ab45-bb9689a750a2","Type":"ContainerStarted","Data":"2b742edb00546a05f5628be1a569f7182f553af3d1f5b8358c042a4ddf44a0f7"} Jan 21 15:26:48 crc kubenswrapper[5021]: I0121 15:26:48.972893 5021 generic.go:334] "Generic (PLEG): container finished" podID="e72ac95a-ad13-408c-b595-9e983c185119" containerID="a4166f6861bd3b19d18241cdbc6bd18d47ba7b0067d528abe9c7417afc61df56" exitCode=0 Jan 21 15:26:48 crc kubenswrapper[5021]: I0121 15:26:48.973083 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-g8wp8" event={"ID":"e72ac95a-ad13-408c-b595-9e983c185119","Type":"ContainerDied","Data":"a4166f6861bd3b19d18241cdbc6bd18d47ba7b0067d528abe9c7417afc61df56"} Jan 21 15:26:48 crc kubenswrapper[5021]: I0121 15:26:48.979796 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-4459k" event={"ID":"c490e95d-e462-45b2-8352-9603283319e1","Type":"ContainerStarted","Data":"75a173ad5964d115acfffe0d0657c3312e7553d3ab3aa76f0dc2b7040e963937"} Jan 21 15:26:48 crc kubenswrapper[5021]: I0121 15:26:48.982574 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-6qfmg" event={"ID":"cdd0f7b7-d192-425e-87ad-0ec36f050bd7","Type":"ContainerStarted","Data":"75c369cdf23b88ecedaef1ca36f2e370e614bf0b0d542848231ea31d9fb8bd0c"} Jan 21 15:26:48 crc kubenswrapper[5021]: I0121 15:26:48.985005 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hwzmx" event={"ID":"13da1899-cf91-483d-99ea-7c6aa673e4f7","Type":"ContainerStarted","Data":"a38ec8bd0eb8b7611ced89ec5d75686faabe9cf44ab58cfb23cee56d4f1d79fa"} Jan 21 15:26:48 crc kubenswrapper[5021]: I0121 15:26:48.985040 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hwzmx" event={"ID":"13da1899-cf91-483d-99ea-7c6aa673e4f7","Type":"ContainerStarted","Data":"b4f9d2a48b7dd691ff34417c6b8e8a2ab8a305abb25b14fe27cf909bd9597a1e"} Jan 21 15:26:48 crc kubenswrapper[5021]: I0121 15:26:48.987359 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4c5bh" event={"ID":"680fb251-0f8f-4a72-a386-9ee555576980","Type":"ContainerStarted","Data":"3bf9b9523ac25d53e93c50f8a6a29d2993f74b78d6db15177da20eaa6dc4b507"} Jan 21 15:26:48 crc kubenswrapper[5021]: I0121 15:26:48.991760 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-skc42" event={"ID":"8dbbe964-1a4c-456b-aa6c-e8473ef20bfc","Type":"ContainerStarted","Data":"39206f5687debad45fb3987462865a40c9a8ca879a6c5153479a9b27744fec59"} Jan 21 15:26:48 crc kubenswrapper[5021]: I0121 15:26:48.995196 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-hd2pm" event={"ID":"da0f51c3-e6b4-49ae-8286-500e3ff30211","Type":"ContainerStarted","Data":"1a72108d05a9e525b6779cb8de592c0b169a648fc428c37d2d985d9dd90c4165"} Jan 21 15:26:48 crc kubenswrapper[5021]: I0121 15:26:48.995285 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-hd2pm" event={"ID":"da0f51c3-e6b4-49ae-8286-500e3ff30211","Type":"ContainerStarted","Data":"59db597710639310eb2637fd685b2a48d8618d6bc8321f8e8e727b2792381306"} Jan 21 15:26:48 crc kubenswrapper[5021]: I0121 15:26:48.998445 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"6b4103dc9814ef6b74dcc0489ac802e1a7534e620ef1ba63d554be41f8e42c3e"} Jan 21 15:26:48 crc kubenswrapper[5021]: I0121 15:26:48.998481 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-zf7sb" Jan 21 15:26:48 crc kubenswrapper[5021]: I0121 15:26:48.998497 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:26:49 crc kubenswrapper[5021]: I0121 15:26:49.001877 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qcz9k" Jan 21 15:26:49 crc kubenswrapper[5021]: I0121 15:26:49.001935 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-vzlcs" Jan 21 15:26:49 crc kubenswrapper[5021]: I0121 15:26:49.002097 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-xlz6c" Jan 21 15:26:49 crc kubenswrapper[5021]: I0121 15:26:49.003658 5021 patch_prober.go:28] interesting pod/downloads-7954f5f757-vzlcs container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" start-of-body= Jan 21 15:26:49 crc kubenswrapper[5021]: I0121 15:26:49.003715 5021 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-vzlcs" podUID="1eae8258-0ffa-4aad-9ac4-747259f4cae0" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" Jan 21 15:26:49 crc kubenswrapper[5021]: I0121 15:26:49.005936 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qcz9k" Jan 21 15:26:49 crc kubenswrapper[5021]: I0121 15:26:49.010679 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-xlz6c" Jan 21 15:26:49 crc kubenswrapper[5021]: I0121 15:26:49.024989 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:49 crc kubenswrapper[5021]: E0121 15:26:49.025235 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:49.525201406 +0000 UTC m=+151.060315295 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:49 crc kubenswrapper[5021]: I0121 15:26:49.025747 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:49 crc kubenswrapper[5021]: E0121 15:26:49.028644 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:49.528626693 +0000 UTC m=+151.063740682 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:49 crc kubenswrapper[5021]: I0121 15:26:49.127562 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:49 crc kubenswrapper[5021]: E0121 15:26:49.129649 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:49.629622753 +0000 UTC m=+151.164736782 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:49 crc kubenswrapper[5021]: I0121 15:26:49.231125 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:49 crc kubenswrapper[5021]: E0121 15:26:49.231796 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:49.731763086 +0000 UTC m=+151.266877015 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:49 crc kubenswrapper[5021]: W0121 15:26:49.308815 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod256b478f_1967_4699_857d_bc41f69654f2.slice/crio-d4c6aca4319ae6da3e599355b466fd42b5c6ff74eb57976758e044c4b0f27646 WatchSource:0}: Error finding container d4c6aca4319ae6da3e599355b466fd42b5c6ff74eb57976758e044c4b0f27646: Status 404 returned error can't find the container with id d4c6aca4319ae6da3e599355b466fd42b5c6ff74eb57976758e044c4b0f27646 Jan 21 15:26:49 crc kubenswrapper[5021]: I0121 15:26:49.330997 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-k8lpr"] Jan 21 15:26:49 crc kubenswrapper[5021]: I0121 15:26:49.332429 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:49 crc kubenswrapper[5021]: E0121 15:26:49.332760 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:49.832715856 +0000 UTC m=+151.367829765 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:49 crc kubenswrapper[5021]: I0121 15:26:49.333062 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:49 crc kubenswrapper[5021]: E0121 15:26:49.337348 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:49.837321776 +0000 UTC m=+151.372435675 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:49 crc kubenswrapper[5021]: I0121 15:26:49.340348 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-vpddf"] Jan 21 15:26:49 crc kubenswrapper[5021]: I0121 15:26:49.351106 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-kmdn7"] Jan 21 15:26:49 crc kubenswrapper[5021]: I0121 15:26:49.354540 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-xq97s"] Jan 21 15:26:49 crc kubenswrapper[5021]: I0121 15:26:49.362050 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-w8nsc"] Jan 21 15:26:49 crc kubenswrapper[5021]: I0121 15:26:49.369024 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-m2brn"] Jan 21 15:26:49 crc kubenswrapper[5021]: I0121 15:26:49.372159 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-64lkf"] Jan 21 15:26:49 crc kubenswrapper[5021]: I0121 15:26:49.374225 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-phhx7"] Jan 21 15:26:49 crc kubenswrapper[5021]: I0121 15:26:49.376026 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-gglqm"] Jan 21 15:26:49 crc kubenswrapper[5021]: I0121 15:26:49.379824 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-jm4qh"] Jan 21 15:26:49 crc kubenswrapper[5021]: I0121 15:26:49.383251 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-ntg54"] Jan 21 15:26:49 crc kubenswrapper[5021]: I0121 15:26:49.385118 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-pkmbf"] Jan 21 15:26:49 crc kubenswrapper[5021]: I0121 15:26:49.386779 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-p5qnh"] Jan 21 15:26:49 crc kubenswrapper[5021]: W0121 15:26:49.388181 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podaf171da1_a5e5_4811_8621_3acb113bb571.slice/crio-74a54ccf4eac09ced09e3f6b3ee1dc17b079faf31d651b0f849efcf0c3e29857 WatchSource:0}: Error finding container 74a54ccf4eac09ced09e3f6b3ee1dc17b079faf31d651b0f849efcf0c3e29857: Status 404 returned error can't find the container with id 74a54ccf4eac09ced09e3f6b3ee1dc17b079faf31d651b0f849efcf0c3e29857 Jan 21 15:26:49 crc kubenswrapper[5021]: I0121 15:26:49.434109 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:49 crc kubenswrapper[5021]: E0121 15:26:49.434943 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:49.93488883 +0000 UTC m=+151.470002719 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:49 crc kubenswrapper[5021]: W0121 15:26:49.451725 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7146e72d_adb4_4283_aad0_8ed4b6363be9.slice/crio-61ec1ca7840fc53696475858ee3f0a1182d0cd7f45d6df1b069c44e368a13d7a WatchSource:0}: Error finding container 61ec1ca7840fc53696475858ee3f0a1182d0cd7f45d6df1b069c44e368a13d7a: Status 404 returned error can't find the container with id 61ec1ca7840fc53696475858ee3f0a1182d0cd7f45d6df1b069c44e368a13d7a Jan 21 15:26:49 crc kubenswrapper[5021]: I0121 15:26:49.480548 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-zf7sb" Jan 21 15:26:49 crc kubenswrapper[5021]: I0121 15:26:49.540067 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:49 crc kubenswrapper[5021]: E0121 15:26:49.542054 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:50.042018825 +0000 UTC m=+151.577132714 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:49 crc kubenswrapper[5021]: I0121 15:26:49.649370 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:49 crc kubenswrapper[5021]: E0121 15:26:49.649780 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:50.149761007 +0000 UTC m=+151.684874906 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:49 crc kubenswrapper[5021]: I0121 15:26:49.751621 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:49 crc kubenswrapper[5021]: E0121 15:26:49.753983 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:50.253957968 +0000 UTC m=+151.789071857 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:49 crc kubenswrapper[5021]: I0121 15:26:49.867755 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:49 crc kubenswrapper[5021]: E0121 15:26:49.868234 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:50.368216665 +0000 UTC m=+151.903330544 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:49 crc kubenswrapper[5021]: I0121 15:26:49.897478 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qcz9k" podStartSLOduration=126.897448752 podStartE2EDuration="2m6.897448752s" podCreationTimestamp="2026-01-21 15:24:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:26:49.841385824 +0000 UTC m=+151.376499723" watchObservedRunningTime="2026-01-21 15:26:49.897448752 +0000 UTC m=+151.432562641" Jan 21 15:26:49 crc kubenswrapper[5021]: I0121 15:26:49.939765 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-khdzj" podStartSLOduration=127.9397363 podStartE2EDuration="2m7.9397363s" podCreationTimestamp="2026-01-21 15:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:26:49.939005649 +0000 UTC m=+151.474119538" watchObservedRunningTime="2026-01-21 15:26:49.9397363 +0000 UTC m=+151.474850190" Jan 21 15:26:49 crc kubenswrapper[5021]: I0121 15:26:49.969136 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:49 crc kubenswrapper[5021]: E0121 15:26:49.969500 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:50.469488193 +0000 UTC m=+152.004602082 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:50 crc kubenswrapper[5021]: I0121 15:26:50.019067 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-cvs5m" event={"ID":"602fe93d-2ebf-458c-8dfb-2cd9ba0ce57b","Type":"ContainerStarted","Data":"b5961e3781c782deec7824905f78d772462ab79bbed04ceed11291d4833bc46a"} Jan 21 15:26:50 crc kubenswrapper[5021]: I0121 15:26:50.030458 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-lp8vp" podStartSLOduration=128.030433579 podStartE2EDuration="2m8.030433579s" podCreationTimestamp="2026-01-21 15:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:26:50.029670558 +0000 UTC m=+151.564784447" watchObservedRunningTime="2026-01-21 15:26:50.030433579 +0000 UTC m=+151.565547468" Jan 21 15:26:50 crc kubenswrapper[5021]: I0121 15:26:50.034013 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-vpddf" event={"ID":"f384f01c-b331-4051-aef7-8da3fbbad2ab","Type":"ContainerStarted","Data":"146214f1aa120c88a5905c61ec50ef2362794ad4b0008fe2deabf1f191c742d0"} Jan 21 15:26:50 crc kubenswrapper[5021]: I0121 15:26:50.071457 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:50 crc kubenswrapper[5021]: E0121 15:26:50.071788 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:50.57175333 +0000 UTC m=+152.106867219 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:50 crc kubenswrapper[5021]: I0121 15:26:50.074177 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:50 crc kubenswrapper[5021]: E0121 15:26:50.075364 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:50.575347012 +0000 UTC m=+152.110460901 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:50 crc kubenswrapper[5021]: I0121 15:26:50.082353 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" podStartSLOduration=128.08232346 podStartE2EDuration="2m8.08232346s" podCreationTimestamp="2026-01-21 15:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:26:50.072259334 +0000 UTC m=+151.607373223" watchObservedRunningTime="2026-01-21 15:26:50.08232346 +0000 UTC m=+151.617437349" Jan 21 15:26:50 crc kubenswrapper[5021]: I0121 15:26:50.136212 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-m2brn" event={"ID":"c0332d4a-fb12-4d96-ae36-bb7295b28a87","Type":"ContainerStarted","Data":"dfc90cf9fe0e951eb33f588aae6a30d193f048896a20f1fbb8cfa92c2eb2df3d"} Jan 21 15:26:50 crc kubenswrapper[5021]: I0121 15:26:50.163052 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-cgt27" event={"ID":"a62ba26d-f037-478b-8dd1-47ffb968b8a6","Type":"ContainerStarted","Data":"0aba77d37854c564f72cb901ee6af421e20d06bb259a1dd01e06c9291b8854f9"} Jan 21 15:26:50 crc kubenswrapper[5021]: I0121 15:26:50.177731 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:50 crc kubenswrapper[5021]: E0121 15:26:50.178228 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:50.678203015 +0000 UTC m=+152.213316904 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:50 crc kubenswrapper[5021]: I0121 15:26:50.201654 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-xq97s" event={"ID":"7714bc37-dbe3-45b4-8bb2-84e9d9d05779","Type":"ContainerStarted","Data":"e2a62b1c51c5d6e935731e8712f81e8f5f379e2ce0568c7e25cc2b84ffe4727f"} Jan 21 15:26:50 crc kubenswrapper[5021]: I0121 15:26:50.207801 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-k8lpr" event={"ID":"af171da1-a5e5-4811-8621-3acb113bb571","Type":"ContainerStarted","Data":"74a54ccf4eac09ced09e3f6b3ee1dc17b079faf31d651b0f849efcf0c3e29857"} Jan 21 15:26:50 crc kubenswrapper[5021]: I0121 15:26:50.217029 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-phhx7" event={"ID":"d7b4ba4d-dc1d-4720-81f3-57059d529def","Type":"ContainerStarted","Data":"08b87a30b9ee7a38ace3279a0c3b3cd4daa559fc38affc549afc386e440d11e0"} Jan 21 15:26:50 crc kubenswrapper[5021]: I0121 15:26:50.220406 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-zf7sb" podStartSLOduration=128.22039147 podStartE2EDuration="2m8.22039147s" podCreationTimestamp="2026-01-21 15:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:26:50.171721471 +0000 UTC m=+151.706835370" watchObservedRunningTime="2026-01-21 15:26:50.22039147 +0000 UTC m=+151.755505369" Jan 21 15:26:50 crc kubenswrapper[5021]: I0121 15:26:50.221584 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-zld8h" podStartSLOduration=128.221576734 podStartE2EDuration="2m8.221576734s" podCreationTimestamp="2026-01-21 15:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:26:50.220963737 +0000 UTC m=+151.756077626" watchObservedRunningTime="2026-01-21 15:26:50.221576734 +0000 UTC m=+151.756690623" Jan 21 15:26:50 crc kubenswrapper[5021]: I0121 15:26:50.240808 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-kmdn7" event={"ID":"7146e72d-adb4-4283-aad0-8ed4b6363be9","Type":"ContainerStarted","Data":"61ec1ca7840fc53696475858ee3f0a1182d0cd7f45d6df1b069c44e368a13d7a"} Jan 21 15:26:50 crc kubenswrapper[5021]: I0121 15:26:50.245134 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-p5qnh" event={"ID":"3b265d32-a4f3-4a09-931e-6f6ac0b82c1c","Type":"ContainerStarted","Data":"2ccbce2911fb43b7027da22cc75620a1f7681a066e8d6855241a48445b8c0b59"} Jan 21 15:26:50 crc kubenswrapper[5021]: I0121 15:26:50.263863 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-92qbd" podStartSLOduration=128.263846211 podStartE2EDuration="2m8.263846211s" podCreationTimestamp="2026-01-21 15:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:26:50.262436471 +0000 UTC m=+151.797550370" watchObservedRunningTime="2026-01-21 15:26:50.263846211 +0000 UTC m=+151.798960110" Jan 21 15:26:50 crc kubenswrapper[5021]: I0121 15:26:50.285879 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:50 crc kubenswrapper[5021]: E0121 15:26:50.287336 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:50.787319746 +0000 UTC m=+152.322433635 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:50 crc kubenswrapper[5021]: I0121 15:26:50.297884 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-m6vx8" event={"ID":"256b478f-1967-4699-857d-bc41f69654f2","Type":"ContainerStarted","Data":"d4c6aca4319ae6da3e599355b466fd42b5c6ff74eb57976758e044c4b0f27646"} Jan 21 15:26:50 crc kubenswrapper[5021]: I0121 15:26:50.311942 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-jm4qh" event={"ID":"f90d4209-f294-478a-85fd-42c8e91bd6aa","Type":"ContainerStarted","Data":"6ff14506d83986effa78fd7e1032656ff12bf01b2d0a168b2007dcebf32bf44e"} Jan 21 15:26:50 crc kubenswrapper[5021]: I0121 15:26:50.324355 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-64lkf" event={"ID":"4d5503fc-0527-456d-b97d-7a455bdf3e7f","Type":"ContainerStarted","Data":"fe56acaa12151de3594a9dfd6ad6783a0d3ce22c496e1feb9b47ee767c05721b"} Jan 21 15:26:50 crc kubenswrapper[5021]: I0121 15:26:50.326037 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-gglqm" event={"ID":"1530383e-f6f4-47de-8302-dfe172a883e7","Type":"ContainerStarted","Data":"2a61550c3cd358937d2df3de151ea18221b2a6e6a382f21b6b36c8d07c4a338a"} Jan 21 15:26:50 crc kubenswrapper[5021]: I0121 15:26:50.345155 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-vzlcs" podStartSLOduration=128.345137643 podStartE2EDuration="2m8.345137643s" podCreationTimestamp="2026-01-21 15:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:26:50.341439149 +0000 UTC m=+151.876553038" watchObservedRunningTime="2026-01-21 15:26:50.345137643 +0000 UTC m=+151.880251532" Jan 21 15:26:50 crc kubenswrapper[5021]: I0121 15:26:50.368933 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-xlz6c" podStartSLOduration=128.368891436 podStartE2EDuration="2m8.368891436s" podCreationTimestamp="2026-01-21 15:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:26:50.368547097 +0000 UTC m=+151.903661006" watchObservedRunningTime="2026-01-21 15:26:50.368891436 +0000 UTC m=+151.904005335" Jan 21 15:26:50 crc kubenswrapper[5021]: I0121 15:26:50.383675 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ntg54" event={"ID":"d5642370-ee34-4ee6-8ae8-0951768da987","Type":"ContainerStarted","Data":"09993d58a6feb4b1b5cb463a048c2902d3fa367426f323c55e1719ed4b8db348"} Jan 21 15:26:50 crc kubenswrapper[5021]: I0121 15:26:50.392412 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:50 crc kubenswrapper[5021]: E0121 15:26:50.392855 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:50.892839914 +0000 UTC m=+152.427953803 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:50 crc kubenswrapper[5021]: I0121 15:26:50.404974 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-pkmbf" event={"ID":"f0a0868f-a7c7-4bce-a9b5-855a11e2631e","Type":"ContainerStarted","Data":"3dfad55ff31be675d8127ec3d9e381f71110b5cf1724882d11c5c6abbb4b88cb"} Jan 21 15:26:50 crc kubenswrapper[5021]: I0121 15:26:50.406819 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-cvs5m" podStartSLOduration=128.40679684 podStartE2EDuration="2m8.40679684s" podCreationTimestamp="2026-01-21 15:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:26:50.405315968 +0000 UTC m=+151.940429857" watchObservedRunningTime="2026-01-21 15:26:50.40679684 +0000 UTC m=+151.941910729" Jan 21 15:26:50 crc kubenswrapper[5021]: I0121 15:26:50.414635 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-w8nsc" event={"ID":"8fb86530-c6d5-4e68-960b-2eadac1c8973","Type":"ContainerStarted","Data":"22db00d6730e7cff8708353fd40c08723eca466e3a6c5e4f736754f5dfc668d3"} Jan 21 15:26:50 crc kubenswrapper[5021]: I0121 15:26:50.440731 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-cgt27" podStartSLOduration=127.44071082 podStartE2EDuration="2m7.44071082s" podCreationTimestamp="2026-01-21 15:24:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:26:50.438895579 +0000 UTC m=+151.974009458" watchObservedRunningTime="2026-01-21 15:26:50.44071082 +0000 UTC m=+151.975824719" Jan 21 15:26:50 crc kubenswrapper[5021]: I0121 15:26:50.442148 5021 generic.go:334] "Generic (PLEG): container finished" podID="549f101f-6acf-41be-9263-57bb5902cbd6" containerID="774e53c568db2da545b4f86a97b8396d6367e8f8f94dacbd8bc436a843764799" exitCode=0 Jan 21 15:26:50 crc kubenswrapper[5021]: I0121 15:26:50.442249 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlk6l" event={"ID":"549f101f-6acf-41be-9263-57bb5902cbd6","Type":"ContainerDied","Data":"774e53c568db2da545b4f86a97b8396d6367e8f8f94dacbd8bc436a843764799"} Jan 21 15:26:50 crc kubenswrapper[5021]: I0121 15:26:50.487238 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-7t8pz" event={"ID":"8ca0f13c-b009-400f-94cd-f50d9209eb6c","Type":"ContainerStarted","Data":"2ce35264cf7d58a173a85ea3cdf2789a47d70fc0a081132bbdf9c77cfddfb6e5"} Jan 21 15:26:50 crc kubenswrapper[5021]: I0121 15:26:50.488647 5021 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-bw5fp container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.14:6443/healthz\": dial tcp 10.217.0.14:6443: connect: connection refused" start-of-body= Jan 21 15:26:50 crc kubenswrapper[5021]: I0121 15:26:50.489392 5021 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" podUID="cdbaee95-6f0a-4b0e-8969-0b9fb32f808d" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.14:6443/healthz\": dial tcp 10.217.0.14:6443: connect: connection refused" Jan 21 15:26:50 crc kubenswrapper[5021]: I0121 15:26:50.488693 5021 patch_prober.go:28] interesting pod/downloads-7954f5f757-vzlcs container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" start-of-body= Jan 21 15:26:50 crc kubenswrapper[5021]: I0121 15:26:50.489780 5021 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-vzlcs" podUID="1eae8258-0ffa-4aad-9ac4-747259f4cae0" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" Jan 21 15:26:50 crc kubenswrapper[5021]: I0121 15:26:50.493787 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:50 crc kubenswrapper[5021]: E0121 15:26:50.494271 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:50.994251657 +0000 UTC m=+152.529365546 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:50 crc kubenswrapper[5021]: I0121 15:26:50.565569 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-hkbvp" podStartSLOduration=7.565552377 podStartE2EDuration="7.565552377s" podCreationTimestamp="2026-01-21 15:26:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:26:50.564285431 +0000 UTC m=+152.099399320" watchObservedRunningTime="2026-01-21 15:26:50.565552377 +0000 UTC m=+152.100666266" Jan 21 15:26:50 crc kubenswrapper[5021]: I0121 15:26:50.582729 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-4459k" Jan 21 15:26:50 crc kubenswrapper[5021]: I0121 15:26:50.596457 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:50 crc kubenswrapper[5021]: E0121 15:26:50.600482 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:51.100458736 +0000 UTC m=+152.635572625 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:50 crc kubenswrapper[5021]: I0121 15:26:50.604258 5021 patch_prober.go:28] interesting pod/router-default-5444994796-4459k container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 15:26:50 crc kubenswrapper[5021]: [-]has-synced failed: reason withheld Jan 21 15:26:50 crc kubenswrapper[5021]: [+]process-running ok Jan 21 15:26:50 crc kubenswrapper[5021]: healthz check failed Jan 21 15:26:50 crc kubenswrapper[5021]: I0121 15:26:50.604308 5021 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4459k" podUID="c490e95d-e462-45b2-8352-9603283319e1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 15:26:50 crc kubenswrapper[5021]: I0121 15:26:50.616342 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-6qfmg" podStartSLOduration=128.616315285 podStartE2EDuration="2m8.616315285s" podCreationTimestamp="2026-01-21 15:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:26:50.615042149 +0000 UTC m=+152.150156038" watchObservedRunningTime="2026-01-21 15:26:50.616315285 +0000 UTC m=+152.151429174" Jan 21 15:26:50 crc kubenswrapper[5021]: I0121 15:26:50.700166 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:50 crc kubenswrapper[5021]: E0121 15:26:50.700474 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:51.200461928 +0000 UTC m=+152.735575817 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:50 crc kubenswrapper[5021]: I0121 15:26:50.735111 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-4459k" podStartSLOduration=128.735088399 podStartE2EDuration="2m8.735088399s" podCreationTimestamp="2026-01-21 15:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:26:50.700692865 +0000 UTC m=+152.235806754" watchObservedRunningTime="2026-01-21 15:26:50.735088399 +0000 UTC m=+152.270202288" Jan 21 15:26:50 crc kubenswrapper[5021]: I0121 15:26:50.764633 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-skc42" podStartSLOduration=127.764613416 podStartE2EDuration="2m7.764613416s" podCreationTimestamp="2026-01-21 15:24:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:26:50.736375076 +0000 UTC m=+152.271488965" watchObservedRunningTime="2026-01-21 15:26:50.764613416 +0000 UTC m=+152.299727305" Jan 21 15:26:50 crc kubenswrapper[5021]: I0121 15:26:50.802707 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hwzmx" podStartSLOduration=127.802689824 podStartE2EDuration="2m7.802689824s" podCreationTimestamp="2026-01-21 15:24:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:26:50.76690315 +0000 UTC m=+152.302017039" watchObservedRunningTime="2026-01-21 15:26:50.802689824 +0000 UTC m=+152.337803713" Jan 21 15:26:50 crc kubenswrapper[5021]: I0121 15:26:50.804004 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4c5bh" podStartSLOduration=128.803994411 podStartE2EDuration="2m8.803994411s" podCreationTimestamp="2026-01-21 15:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:26:50.801375237 +0000 UTC m=+152.336489126" watchObservedRunningTime="2026-01-21 15:26:50.803994411 +0000 UTC m=+152.339108300" Jan 21 15:26:50 crc kubenswrapper[5021]: I0121 15:26:50.803166 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:50 crc kubenswrapper[5021]: E0121 15:26:50.803226 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:51.303213428 +0000 UTC m=+152.838327317 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:50 crc kubenswrapper[5021]: I0121 15:26:50.805782 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:50 crc kubenswrapper[5021]: E0121 15:26:50.806354 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:51.306340668 +0000 UTC m=+152.841454557 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:50 crc kubenswrapper[5021]: I0121 15:26:50.907648 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:50 crc kubenswrapper[5021]: E0121 15:26:50.907976 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:51.407961026 +0000 UTC m=+152.943074915 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.008858 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:51 crc kubenswrapper[5021]: E0121 15:26:51.009378 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:51.509366308 +0000 UTC m=+153.044480187 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.110783 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:51 crc kubenswrapper[5021]: E0121 15:26:51.111213 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:51.611192162 +0000 UTC m=+153.146306051 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.212034 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:51 crc kubenswrapper[5021]: E0121 15:26:51.212301 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:51.712289306 +0000 UTC m=+153.247403195 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.230772 5021 csr.go:261] certificate signing request csr-5bbtz is approved, waiting to be issued Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.240700 5021 csr.go:257] certificate signing request csr-5bbtz is issued Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.315066 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:51 crc kubenswrapper[5021]: E0121 15:26:51.315264 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:51.815240862 +0000 UTC m=+153.350354751 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.315441 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:51 crc kubenswrapper[5021]: E0121 15:26:51.315796 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:51.815784317 +0000 UTC m=+153.350898206 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.416216 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:51 crc kubenswrapper[5021]: E0121 15:26:51.416444 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:51.916419118 +0000 UTC m=+153.451533007 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.416681 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:51 crc kubenswrapper[5021]: E0121 15:26:51.417064 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:51.917049576 +0000 UTC m=+153.452163465 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.520432 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:51 crc kubenswrapper[5021]: E0121 15:26:51.521270 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:52.021250697 +0000 UTC m=+153.556364586 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.593189 5021 patch_prober.go:28] interesting pod/router-default-5444994796-4459k container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 15:26:51 crc kubenswrapper[5021]: [-]has-synced failed: reason withheld Jan 21 15:26:51 crc kubenswrapper[5021]: [+]process-running ok Jan 21 15:26:51 crc kubenswrapper[5021]: healthz check failed Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.593236 5021 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4459k" podUID="c490e95d-e462-45b2-8352-9603283319e1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.609704 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-g8wp8" event={"ID":"e72ac95a-ad13-408c-b595-9e983c185119","Type":"ContainerStarted","Data":"564cbc55c68a1d4d3cac1f6bd375d521053827e565481306e6e35f945eca6278"} Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.624826 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:51 crc kubenswrapper[5021]: E0121 15:26:51.625204 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:52.125192001 +0000 UTC m=+153.660305890 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.627282 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-jm4qh" event={"ID":"f90d4209-f294-478a-85fd-42c8e91bd6aa","Type":"ContainerStarted","Data":"1a10a16ed33acfef906ace8777781a1538b0c7f51c9646efc8ccd48fac09b514"} Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.633464 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-phhx7" event={"ID":"d7b4ba4d-dc1d-4720-81f3-57059d529def","Type":"ContainerStarted","Data":"9eaee1d0587ae9fe0ee35940acce5b3a247140d80ac91cda2986c76b0d045dc0"} Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.634413 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-phhx7" Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.648165 5021 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-phhx7 container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.28:8443/healthz\": dial tcp 10.217.0.28:8443: connect: connection refused" start-of-body= Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.648262 5021 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-phhx7" podUID="d7b4ba4d-dc1d-4720-81f3-57059d529def" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.28:8443/healthz\": dial tcp 10.217.0.28:8443: connect: connection refused" Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.675181 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-phhx7" podStartSLOduration=128.675162667 podStartE2EDuration="2m8.675162667s" podCreationTimestamp="2026-01-21 15:24:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:26:51.67031985 +0000 UTC m=+153.205433749" watchObservedRunningTime="2026-01-21 15:26:51.675162667 +0000 UTC m=+153.210276556" Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.681110 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-kn7v9" event={"ID":"d8a36d28-fc2f-44fc-adca-fe218362ba3a","Type":"ContainerStarted","Data":"98a8436b6f7430ed08e6ec8474f4f797ac5ebdcbfd78b2aedd6d080daeb98063"} Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.681224 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-kn7v9" Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.701273 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29483475-7s6jf" event={"ID":"71337f59-d4e5-47da-9d8e-759bd17cfdc3","Type":"ContainerStarted","Data":"a5bb51c915ed8831067aabe10257efffba87f9dc2079eb6e60ead96e294df68a"} Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.725753 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-m6vx8" event={"ID":"256b478f-1967-4699-857d-bc41f69654f2","Type":"ContainerStarted","Data":"c408205f23e4a22a8f8c0061f7a8d12bcd0358c8629b789a8eab7fc736d0a5cb"} Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.725821 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-m6vx8" event={"ID":"256b478f-1967-4699-857d-bc41f69654f2","Type":"ContainerStarted","Data":"5da23da25132028f94eb56dd645a8511d57ebbfd8d3d0bbcb4d5201893aeb5d8"} Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.727618 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:51 crc kubenswrapper[5021]: E0121 15:26:51.727765 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:52.227745147 +0000 UTC m=+153.762859036 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.727871 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:51 crc kubenswrapper[5021]: E0121 15:26:51.729124 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:52.229108725 +0000 UTC m=+153.764222614 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.735493 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-kn7v9" podStartSLOduration=129.735467865 podStartE2EDuration="2m9.735467865s" podCreationTimestamp="2026-01-21 15:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:26:51.734781396 +0000 UTC m=+153.269895305" watchObservedRunningTime="2026-01-21 15:26:51.735467865 +0000 UTC m=+153.270581754" Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.737478 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlk6l" event={"ID":"549f101f-6acf-41be-9263-57bb5902cbd6","Type":"ContainerStarted","Data":"fcc1717fc95524acee9749fcadc28b9396fc588ddab30d7766c662d53e3ec297"} Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.739935 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-58trd" event={"ID":"c11241f4-6831-47d6-b5de-e8da9ccf7cad","Type":"ContainerStarted","Data":"a52ad0a40e305e56df4746e2e44f32b68a44444051ffcf2ae3d23105402aad6e"} Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.745058 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-k8lpr" event={"ID":"af171da1-a5e5-4811-8621-3acb113bb571","Type":"ContainerStarted","Data":"3ee466ac60f4dd4b8513dad85f6b2effe14bf812a98af7b53455a9e1673a6f79"} Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.767672 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-m2brn" event={"ID":"c0332d4a-fb12-4d96-ae36-bb7295b28a87","Type":"ContainerStarted","Data":"587fa11fd5381b4c1315a7840db69e549090ef937cb1412e978f10377bff505f"} Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.768367 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-m2brn" Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.771608 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-64lkf" event={"ID":"4d5503fc-0527-456d-b97d-7a455bdf3e7f","Type":"ContainerStarted","Data":"4da1501877cebad5c1c90b8960284432b1e6dd038061a8dc63bfe91d165c0756"} Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.779983 5021 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-m2brn container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.40:8080/healthz\": dial tcp 10.217.0.40:8080: connect: connection refused" start-of-body= Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.780027 5021 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-m2brn" podUID="c0332d4a-fb12-4d96-ae36-bb7295b28a87" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.40:8080/healthz\": dial tcp 10.217.0.40:8080: connect: connection refused" Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.783780 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-9fhn9" event={"ID":"8a5b34e0-9ec6-42cb-902c-0c8336b514ff","Type":"ContainerStarted","Data":"bbb86a34e40a8f0e5b056a216b4402998722d2d3ef91241e910b9bc5a2f2be48"} Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.784397 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-9fhn9" Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.793373 5021 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-9fhn9 container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.38:8443/healthz\": dial tcp 10.217.0.38:8443: connect: connection refused" start-of-body= Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.793439 5021 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-9fhn9" podUID="8a5b34e0-9ec6-42cb-902c-0c8336b514ff" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.38:8443/healthz\": dial tcp 10.217.0.38:8443: connect: connection refused" Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.818256 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-p5qnh" event={"ID":"3b265d32-a4f3-4a09-931e-6f6ac0b82c1c","Type":"ContainerStarted","Data":"c31d0c948696cc01bde5918ede5561026311e157df513363670f511d5ac884b5"} Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.819713 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-gglqm" podStartSLOduration=128.819700031 podStartE2EDuration="2m8.819700031s" podCreationTimestamp="2026-01-21 15:24:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:26:51.819009792 +0000 UTC m=+153.354123681" watchObservedRunningTime="2026-01-21 15:26:51.819700031 +0000 UTC m=+153.354813920" Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.829782 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:51 crc kubenswrapper[5021]: E0121 15:26:51.831417 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:52.331391193 +0000 UTC m=+153.866505082 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.844881 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-w8nsc" Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.859943 5021 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-w8nsc container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.39:5443/healthz\": dial tcp 10.217.0.39:5443: connect: connection refused" start-of-body= Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.859993 5021 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-w8nsc" podUID="8fb86530-c6d5-4e68-960b-2eadac1c8973" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.39:5443/healthz\": dial tcp 10.217.0.39:5443: connect: connection refused" Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.875256 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-7t8pz" event={"ID":"8ca0f13c-b009-400f-94cd-f50d9209eb6c","Type":"ContainerStarted","Data":"efcac896ec0cbd4fd7ce756915bb7ab7a205066f456c53206df7cb663f8f7196"} Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.876028 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-7t8pz" Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.918636 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-kmdn7" podStartSLOduration=128.918618183 podStartE2EDuration="2m8.918618183s" podCreationTimestamp="2026-01-21 15:24:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:26:51.878983801 +0000 UTC m=+153.414097690" watchObservedRunningTime="2026-01-21 15:26:51.918618183 +0000 UTC m=+153.453732072" Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.933288 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:51 crc kubenswrapper[5021]: E0121 15:26:51.934740 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:52.43471721 +0000 UTC m=+153.969831099 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.973647 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-gm5rm" event={"ID":"0654cecd-38e1-4678-9452-5e8b8b1dd07f","Type":"ContainerStarted","Data":"8c783a577a0ce372b9b7f0b71895a391ad2d08dbc9ef0ae0604f99fc37167902"} Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.976446 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29483475-7s6jf" podStartSLOduration=129.97642814 podStartE2EDuration="2m9.97642814s" podCreationTimestamp="2026-01-21 15:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:26:51.974420544 +0000 UTC m=+153.509534453" watchObservedRunningTime="2026-01-21 15:26:51.97642814 +0000 UTC m=+153.511542029" Jan 21 15:26:51 crc kubenswrapper[5021]: I0121 15:26:51.977634 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-m6vx8" podStartSLOduration=128.977626874 podStartE2EDuration="2m8.977626874s" podCreationTimestamp="2026-01-21 15:24:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:26:51.919793386 +0000 UTC m=+153.454907275" watchObservedRunningTime="2026-01-21 15:26:51.977626874 +0000 UTC m=+153.512740763" Jan 21 15:26:52 crc kubenswrapper[5021]: I0121 15:26:52.027858 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-9fhn9" podStartSLOduration=129.027839337 podStartE2EDuration="2m9.027839337s" podCreationTimestamp="2026-01-21 15:24:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:26:52.025864361 +0000 UTC m=+153.560978250" watchObservedRunningTime="2026-01-21 15:26:52.027839337 +0000 UTC m=+153.562953226" Jan 21 15:26:52 crc kubenswrapper[5021]: I0121 15:26:52.030472 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-hd2pm" event={"ID":"da0f51c3-e6b4-49ae-8286-500e3ff30211","Type":"ContainerStarted","Data":"c4116a5e7ced2783d1f7a1eb9c529ae7044c08a17d1cc831f7bf4af40007f34f"} Jan 21 15:26:52 crc kubenswrapper[5021]: I0121 15:26:52.036039 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:52 crc kubenswrapper[5021]: E0121 15:26:52.036477 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:52.536458721 +0000 UTC m=+154.071572610 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:52 crc kubenswrapper[5021]: I0121 15:26:52.141640 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:52 crc kubenswrapper[5021]: E0121 15:26:52.143575 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:52.643561085 +0000 UTC m=+154.178674974 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:52 crc kubenswrapper[5021]: I0121 15:26:52.173675 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlk6l" podStartSLOduration=129.173654537 podStartE2EDuration="2m9.173654537s" podCreationTimestamp="2026-01-21 15:24:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:26:52.173190054 +0000 UTC m=+153.708303963" watchObservedRunningTime="2026-01-21 15:26:52.173654537 +0000 UTC m=+153.708768426" Jan 21 15:26:52 crc kubenswrapper[5021]: I0121 15:26:52.174057 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-p5qnh" podStartSLOduration=129.174052408 podStartE2EDuration="2m9.174052408s" podCreationTimestamp="2026-01-21 15:24:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:26:52.091465388 +0000 UTC m=+153.626579277" watchObservedRunningTime="2026-01-21 15:26:52.174052408 +0000 UTC m=+153.709166297" Jan 21 15:26:52 crc kubenswrapper[5021]: I0121 15:26:52.243792 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2027-01-21 15:21:51 +0000 UTC, rotation deadline is 2026-12-03 06:08:56.193881449 +0000 UTC Jan 21 15:26:52 crc kubenswrapper[5021]: I0121 15:26:52.243844 5021 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 7574h42m3.950041294s for next certificate rotation Jan 21 15:26:52 crc kubenswrapper[5021]: I0121 15:26:52.244375 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:52 crc kubenswrapper[5021]: E0121 15:26:52.244769 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:52.744752821 +0000 UTC m=+154.279866710 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:52 crc kubenswrapper[5021]: I0121 15:26:52.303239 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-k8lpr" podStartSLOduration=130.303194737 podStartE2EDuration="2m10.303194737s" podCreationTimestamp="2026-01-21 15:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:26:52.270772948 +0000 UTC m=+153.805886837" watchObservedRunningTime="2026-01-21 15:26:52.303194737 +0000 UTC m=+153.838308626" Jan 21 15:26:52 crc kubenswrapper[5021]: I0121 15:26:52.306176 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-7t8pz" podStartSLOduration=129.306165671 podStartE2EDuration="2m9.306165671s" podCreationTimestamp="2026-01-21 15:24:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:26:52.304824743 +0000 UTC m=+153.839938632" watchObservedRunningTime="2026-01-21 15:26:52.306165671 +0000 UTC m=+153.841279560" Jan 21 15:26:52 crc kubenswrapper[5021]: I0121 15:26:52.346891 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:52 crc kubenswrapper[5021]: E0121 15:26:52.347510 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:52.847493871 +0000 UTC m=+154.382607750 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:52 crc kubenswrapper[5021]: I0121 15:26:52.353523 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-xq97s" podStartSLOduration=9.353499652 podStartE2EDuration="9.353499652s" podCreationTimestamp="2026-01-21 15:26:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:26:52.352734019 +0000 UTC m=+153.887847908" watchObservedRunningTime="2026-01-21 15:26:52.353499652 +0000 UTC m=+153.888613541" Jan 21 15:26:52 crc kubenswrapper[5021]: I0121 15:26:52.399666 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-m2brn" podStartSLOduration=129.399646139 podStartE2EDuration="2m9.399646139s" podCreationTimestamp="2026-01-21 15:24:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:26:52.394738189 +0000 UTC m=+153.929852078" watchObservedRunningTime="2026-01-21 15:26:52.399646139 +0000 UTC m=+153.934760028" Jan 21 15:26:52 crc kubenswrapper[5021]: I0121 15:26:52.434836 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-58trd" podStartSLOduration=129.434803334 podStartE2EDuration="2m9.434803334s" podCreationTimestamp="2026-01-21 15:24:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:26:52.432563221 +0000 UTC m=+153.967677110" watchObservedRunningTime="2026-01-21 15:26:52.434803334 +0000 UTC m=+153.969917223" Jan 21 15:26:52 crc kubenswrapper[5021]: I0121 15:26:52.452588 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:52 crc kubenswrapper[5021]: E0121 15:26:52.452844 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:52.952797194 +0000 UTC m=+154.487911083 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:52 crc kubenswrapper[5021]: I0121 15:26:52.452917 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:52 crc kubenswrapper[5021]: E0121 15:26:52.453558 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:52.953547105 +0000 UTC m=+154.488660994 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:52 crc kubenswrapper[5021]: I0121 15:26:52.492387 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-xcc8f" podStartSLOduration=130.492358255 podStartE2EDuration="2m10.492358255s" podCreationTimestamp="2026-01-21 15:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:26:52.475365993 +0000 UTC m=+154.010479902" watchObservedRunningTime="2026-01-21 15:26:52.492358255 +0000 UTC m=+154.027472144" Jan 21 15:26:52 crc kubenswrapper[5021]: I0121 15:26:52.547142 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-w8nsc" podStartSLOduration=129.547118416 podStartE2EDuration="2m9.547118416s" podCreationTimestamp="2026-01-21 15:24:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:26:52.540842247 +0000 UTC m=+154.075956136" watchObservedRunningTime="2026-01-21 15:26:52.547118416 +0000 UTC m=+154.082232305" Jan 21 15:26:52 crc kubenswrapper[5021]: I0121 15:26:52.554523 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:52 crc kubenswrapper[5021]: E0121 15:26:52.554886 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:53.054866235 +0000 UTC m=+154.589980124 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:52 crc kubenswrapper[5021]: I0121 15:26:52.592263 5021 patch_prober.go:28] interesting pod/router-default-5444994796-4459k container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 15:26:52 crc kubenswrapper[5021]: [-]has-synced failed: reason withheld Jan 21 15:26:52 crc kubenswrapper[5021]: [+]process-running ok Jan 21 15:26:52 crc kubenswrapper[5021]: healthz check failed Jan 21 15:26:52 crc kubenswrapper[5021]: I0121 15:26:52.592704 5021 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4459k" podUID="c490e95d-e462-45b2-8352-9603283319e1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 15:26:52 crc kubenswrapper[5021]: I0121 15:26:52.649028 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-hd2pm" podStartSLOduration=129.649010972 podStartE2EDuration="2m9.649010972s" podCreationTimestamp="2026-01-21 15:24:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:26:52.647952052 +0000 UTC m=+154.183065961" watchObservedRunningTime="2026-01-21 15:26:52.649010972 +0000 UTC m=+154.184124861" Jan 21 15:26:52 crc kubenswrapper[5021]: I0121 15:26:52.651003 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-gm5rm" podStartSLOduration=129.650996668 podStartE2EDuration="2m9.650996668s" podCreationTimestamp="2026-01-21 15:24:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:26:52.583187077 +0000 UTC m=+154.118300966" watchObservedRunningTime="2026-01-21 15:26:52.650996668 +0000 UTC m=+154.186110557" Jan 21 15:26:52 crc kubenswrapper[5021]: I0121 15:26:52.656600 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:52 crc kubenswrapper[5021]: E0121 15:26:52.657050 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:53.157034378 +0000 UTC m=+154.692148267 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:52 crc kubenswrapper[5021]: I0121 15:26:52.758328 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:52 crc kubenswrapper[5021]: E0121 15:26:52.758504 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:53.258463072 +0000 UTC m=+154.793576961 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:52 crc kubenswrapper[5021]: I0121 15:26:52.758817 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:52 crc kubenswrapper[5021]: E0121 15:26:52.759133 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:53.25912086 +0000 UTC m=+154.794234749 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:52 crc kubenswrapper[5021]: I0121 15:26:52.859849 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:52 crc kubenswrapper[5021]: E0121 15:26:52.860270 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:53.360250436 +0000 UTC m=+154.895364325 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:52 crc kubenswrapper[5021]: I0121 15:26:52.962146 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:52 crc kubenswrapper[5021]: E0121 15:26:52.962601 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:53.462576874 +0000 UTC m=+154.997690823 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:53 crc kubenswrapper[5021]: I0121 15:26:53.045037 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-vpddf" event={"ID":"f384f01c-b331-4051-aef7-8da3fbbad2ab","Type":"ContainerStarted","Data":"f1c87aa8ebbdd185701d328b55d12e3beb22c124ba6894bffc21b2227885619f"} Jan 21 15:26:53 crc kubenswrapper[5021]: I0121 15:26:53.045087 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-vpddf" event={"ID":"f384f01c-b331-4051-aef7-8da3fbbad2ab","Type":"ContainerStarted","Data":"2c5233c25ea87a0723fe1f53210d362553dba30f824c04b53db8a46457c7495a"} Jan 21 15:26:53 crc kubenswrapper[5021]: I0121 15:26:53.048809 5021 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-kn7v9 container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.18:8443/healthz\": dial tcp 10.217.0.18:8443: connect: connection refused" start-of-body= Jan 21 15:26:53 crc kubenswrapper[5021]: I0121 15:26:53.048826 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-g8wp8" event={"ID":"e72ac95a-ad13-408c-b595-9e983c185119","Type":"ContainerStarted","Data":"6e8aa0ab8533d314fb4ea20047ea89d3b92c8921447c61dab40e276c843b73ba"} Jan 21 15:26:53 crc kubenswrapper[5021]: I0121 15:26:53.048871 5021 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-kn7v9" podUID="d8a36d28-fc2f-44fc-adca-fe218362ba3a" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.18:8443/healthz\": dial tcp 10.217.0.18:8443: connect: connection refused" Jan 21 15:26:53 crc kubenswrapper[5021]: I0121 15:26:53.049366 5021 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-kn7v9 container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.18:8443/healthz\": dial tcp 10.217.0.18:8443: connect: connection refused" start-of-body= Jan 21 15:26:53 crc kubenswrapper[5021]: I0121 15:26:53.049400 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-kn7v9" podUID="d8a36d28-fc2f-44fc-adca-fe218362ba3a" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.18:8443/healthz\": dial tcp 10.217.0.18:8443: connect: connection refused" Jan 21 15:26:53 crc kubenswrapper[5021]: I0121 15:26:53.054959 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-xcc8f" event={"ID":"8c09c820-79c0-4e63-b063-2f01381c96fd","Type":"ContainerStarted","Data":"e787c1f83205457b4f66b3d5bc996f7c0317e15cb03760b297f9105526906585"} Jan 21 15:26:53 crc kubenswrapper[5021]: I0121 15:26:53.056882 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-pkmbf" event={"ID":"f0a0868f-a7c7-4bce-a9b5-855a11e2631e","Type":"ContainerStarted","Data":"29c5e0cc035715ea3804681113308eb654566a9dee9ed991f775d55b8967df75"} Jan 21 15:26:53 crc kubenswrapper[5021]: I0121 15:26:53.062576 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-xq97s" event={"ID":"7714bc37-dbe3-45b4-8bb2-84e9d9d05779","Type":"ContainerStarted","Data":"38c85f1d3c26afc92563f276193b949b7cdd736e096ec69a272df4abc49f21b9"} Jan 21 15:26:53 crc kubenswrapper[5021]: I0121 15:26:53.063188 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:53 crc kubenswrapper[5021]: E0121 15:26:53.063364 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:53.563337148 +0000 UTC m=+155.098451047 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:53 crc kubenswrapper[5021]: I0121 15:26:53.063560 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:53 crc kubenswrapper[5021]: E0121 15:26:53.063901 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:53.563889633 +0000 UTC m=+155.099003582 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:53 crc kubenswrapper[5021]: I0121 15:26:53.065868 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ntg54" event={"ID":"d5642370-ee34-4ee6-8ae8-0951768da987","Type":"ContainerStarted","Data":"b39ce4b52a59f4755f914d2909499964b4a8f7d978628b6c4ae86588e4aba497"} Jan 21 15:26:53 crc kubenswrapper[5021]: I0121 15:26:53.066035 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ntg54" event={"ID":"d5642370-ee34-4ee6-8ae8-0951768da987","Type":"ContainerStarted","Data":"c3a0b396bc42ed0a62ea510d4b5128093ee40674a894974d1b0448bd1e0b8b26"} Jan 21 15:26:53 crc kubenswrapper[5021]: I0121 15:26:53.067285 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-gglqm" event={"ID":"1530383e-f6f4-47de-8302-dfe172a883e7","Type":"ContainerStarted","Data":"6b560d21a2d9b84fca82230069fab7e97e042ca8ec831ae9c9a6e82b40ba213c"} Jan 21 15:26:53 crc kubenswrapper[5021]: I0121 15:26:53.072181 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-kmdn7" event={"ID":"7146e72d-adb4-4283-aad0-8ed4b6363be9","Type":"ContainerStarted","Data":"51f082e4f9bd4bf519abd00efc5816e41aa461d500a1853f53ed901925d7ea97"} Jan 21 15:26:53 crc kubenswrapper[5021]: I0121 15:26:53.074204 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-w8nsc" event={"ID":"8fb86530-c6d5-4e68-960b-2eadac1c8973","Type":"ContainerStarted","Data":"39975fd4cab9ce336af34c75b39974887720fa2937ac6bc507ca241b757c6e9c"} Jan 21 15:26:53 crc kubenswrapper[5021]: I0121 15:26:53.074578 5021 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-w8nsc container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.39:5443/healthz\": dial tcp 10.217.0.39:5443: connect: connection refused" start-of-body= Jan 21 15:26:53 crc kubenswrapper[5021]: I0121 15:26:53.074694 5021 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-w8nsc" podUID="8fb86530-c6d5-4e68-960b-2eadac1c8973" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.39:5443/healthz\": dial tcp 10.217.0.39:5443: connect: connection refused" Jan 21 15:26:53 crc kubenswrapper[5021]: I0121 15:26:53.074649 5021 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-kn7v9 container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.18:8443/healthz\": dial tcp 10.217.0.18:8443: connect: connection refused" start-of-body= Jan 21 15:26:53 crc kubenswrapper[5021]: I0121 15:26:53.074928 5021 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-kn7v9" podUID="d8a36d28-fc2f-44fc-adca-fe218362ba3a" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.18:8443/healthz\": dial tcp 10.217.0.18:8443: connect: connection refused" Jan 21 15:26:53 crc kubenswrapper[5021]: I0121 15:26:53.075063 5021 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-m2brn container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.40:8080/healthz\": dial tcp 10.217.0.40:8080: connect: connection refused" start-of-body= Jan 21 15:26:53 crc kubenswrapper[5021]: I0121 15:26:53.075126 5021 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-m2brn" podUID="c0332d4a-fb12-4d96-ae36-bb7295b28a87" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.40:8080/healthz\": dial tcp 10.217.0.40:8080: connect: connection refused" Jan 21 15:26:53 crc kubenswrapper[5021]: I0121 15:26:53.086696 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-9fhn9" Jan 21 15:26:53 crc kubenswrapper[5021]: I0121 15:26:53.088328 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-phhx7" Jan 21 15:26:53 crc kubenswrapper[5021]: I0121 15:26:53.166641 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:53 crc kubenswrapper[5021]: E0121 15:26:53.166744 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:53.666728026 +0000 UTC m=+155.201841915 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:53 crc kubenswrapper[5021]: I0121 15:26:53.168669 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:53 crc kubenswrapper[5021]: E0121 15:26:53.178667 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:53.678643654 +0000 UTC m=+155.213757743 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:53 crc kubenswrapper[5021]: I0121 15:26:53.179959 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-g8wp8" podStartSLOduration=131.17993787 podStartE2EDuration="2m11.17993787s" podCreationTimestamp="2026-01-21 15:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:26:53.177767018 +0000 UTC m=+154.712880917" watchObservedRunningTime="2026-01-21 15:26:53.17993787 +0000 UTC m=+154.715051759" Jan 21 15:26:53 crc kubenswrapper[5021]: I0121 15:26:53.270566 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:53 crc kubenswrapper[5021]: E0121 15:26:53.270673 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:53.77065505 +0000 UTC m=+155.305768939 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:53 crc kubenswrapper[5021]: I0121 15:26:53.270816 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:53 crc kubenswrapper[5021]: E0121 15:26:53.271124 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:53.771113923 +0000 UTC m=+155.306227812 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:53 crc kubenswrapper[5021]: I0121 15:26:53.342941 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-jm4qh" podStartSLOduration=130.342898846 podStartE2EDuration="2m10.342898846s" podCreationTimestamp="2026-01-21 15:24:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:26:53.339702516 +0000 UTC m=+154.874816395" watchObservedRunningTime="2026-01-21 15:26:53.342898846 +0000 UTC m=+154.878012765" Jan 21 15:26:53 crc kubenswrapper[5021]: I0121 15:26:53.371693 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:53 crc kubenswrapper[5021]: E0121 15:26:53.371882 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:53.871855446 +0000 UTC m=+155.406969335 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:53 crc kubenswrapper[5021]: I0121 15:26:53.372080 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:53 crc kubenswrapper[5021]: E0121 15:26:53.372540 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:53.872519005 +0000 UTC m=+155.407632894 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:53 crc kubenswrapper[5021]: I0121 15:26:53.374147 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ntg54" podStartSLOduration=130.374129621 podStartE2EDuration="2m10.374129621s" podCreationTimestamp="2026-01-21 15:24:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:26:53.370770456 +0000 UTC m=+154.905884345" watchObservedRunningTime="2026-01-21 15:26:53.374129621 +0000 UTC m=+154.909243510" Jan 21 15:26:53 crc kubenswrapper[5021]: I0121 15:26:53.472953 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:53 crc kubenswrapper[5021]: E0121 15:26:53.473169 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:53.973131225 +0000 UTC m=+155.508245114 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:53 crc kubenswrapper[5021]: I0121 15:26:53.473309 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:53 crc kubenswrapper[5021]: E0121 15:26:53.473607 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:53.973592349 +0000 UTC m=+155.508706238 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:53 crc kubenswrapper[5021]: I0121 15:26:53.574753 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:53 crc kubenswrapper[5021]: E0121 15:26:53.575024 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:54.074998101 +0000 UTC m=+155.610111990 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:53 crc kubenswrapper[5021]: I0121 15:26:53.575448 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:53 crc kubenswrapper[5021]: E0121 15:26:53.575786 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:54.075771673 +0000 UTC m=+155.610885562 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:53 crc kubenswrapper[5021]: I0121 15:26:53.586735 5021 patch_prober.go:28] interesting pod/router-default-5444994796-4459k container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 15:26:53 crc kubenswrapper[5021]: [-]has-synced failed: reason withheld Jan 21 15:26:53 crc kubenswrapper[5021]: [+]process-running ok Jan 21 15:26:53 crc kubenswrapper[5021]: healthz check failed Jan 21 15:26:53 crc kubenswrapper[5021]: I0121 15:26:53.586795 5021 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4459k" podUID="c490e95d-e462-45b2-8352-9603283319e1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 15:26:53 crc kubenswrapper[5021]: I0121 15:26:53.677117 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:53 crc kubenswrapper[5021]: E0121 15:26:53.677304 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:54.177272167 +0000 UTC m=+155.712386056 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:53 crc kubenswrapper[5021]: I0121 15:26:53.677495 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:53 crc kubenswrapper[5021]: E0121 15:26:53.677784 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:54.177770041 +0000 UTC m=+155.712883930 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:53 crc kubenswrapper[5021]: I0121 15:26:53.779837 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:53 crc kubenswrapper[5021]: E0121 15:26:53.780060 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:54.280028908 +0000 UTC m=+155.815142807 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:53 crc kubenswrapper[5021]: I0121 15:26:53.780138 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:53 crc kubenswrapper[5021]: E0121 15:26:53.780482 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:54.280465131 +0000 UTC m=+155.815579020 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:53 crc kubenswrapper[5021]: I0121 15:26:53.881664 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:53 crc kubenswrapper[5021]: E0121 15:26:53.882128 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:54.382110509 +0000 UTC m=+155.917224398 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:53 crc kubenswrapper[5021]: I0121 15:26:53.973482 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-92qbd" Jan 21 15:26:53 crc kubenswrapper[5021]: I0121 15:26:53.973538 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-92qbd" Jan 21 15:26:53 crc kubenswrapper[5021]: I0121 15:26:53.975285 5021 patch_prober.go:28] interesting pod/console-f9d7485db-92qbd container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.16:8443/health\": dial tcp 10.217.0.16:8443: connect: connection refused" start-of-body= Jan 21 15:26:53 crc kubenswrapper[5021]: I0121 15:26:53.975373 5021 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-92qbd" podUID="d4b323b2-0188-4e04-ab45-bb9689a750a2" containerName="console" probeResult="failure" output="Get \"https://10.217.0.16:8443/health\": dial tcp 10.217.0.16:8443: connect: connection refused" Jan 21 15:26:53 crc kubenswrapper[5021]: I0121 15:26:53.983479 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:53 crc kubenswrapper[5021]: E0121 15:26:53.983809 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:54.483798 +0000 UTC m=+156.018911889 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:54 crc kubenswrapper[5021]: I0121 15:26:54.063551 5021 patch_prober.go:28] interesting pod/downloads-7954f5f757-vzlcs container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" start-of-body= Jan 21 15:26:54 crc kubenswrapper[5021]: I0121 15:26:54.063604 5021 patch_prober.go:28] interesting pod/downloads-7954f5f757-vzlcs container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" start-of-body= Jan 21 15:26:54 crc kubenswrapper[5021]: I0121 15:26:54.063609 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-vzlcs" podUID="1eae8258-0ffa-4aad-9ac4-747259f4cae0" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" Jan 21 15:26:54 crc kubenswrapper[5021]: I0121 15:26:54.063638 5021 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-vzlcs" podUID="1eae8258-0ffa-4aad-9ac4-747259f4cae0" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" Jan 21 15:26:54 crc kubenswrapper[5021]: I0121 15:26:54.070002 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" Jan 21 15:26:54 crc kubenswrapper[5021]: I0121 15:26:54.084951 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:54 crc kubenswrapper[5021]: E0121 15:26:54.085115 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:54.585089239 +0000 UTC m=+156.120203118 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:54 crc kubenswrapper[5021]: I0121 15:26:54.085255 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:54 crc kubenswrapper[5021]: E0121 15:26:54.086883 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:54.58686761 +0000 UTC m=+156.121981499 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:54 crc kubenswrapper[5021]: I0121 15:26:54.101066 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-64lkf" event={"ID":"4d5503fc-0527-456d-b97d-7a455bdf3e7f","Type":"ContainerStarted","Data":"b6a472a2e3e7a18689dc53c16627d2d4c6eac0bbee8e1f68cae47638301bf618"} Jan 21 15:26:54 crc kubenswrapper[5021]: I0121 15:26:54.187457 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:54 crc kubenswrapper[5021]: E0121 15:26:54.188232 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:54.68820105 +0000 UTC m=+156.223314949 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:54 crc kubenswrapper[5021]: I0121 15:26:54.188797 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:54 crc kubenswrapper[5021]: E0121 15:26:54.192336 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:54.692316747 +0000 UTC m=+156.227430736 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:54 crc kubenswrapper[5021]: I0121 15:26:54.197925 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-vpddf" podStartSLOduration=11.197895505 podStartE2EDuration="11.197895505s" podCreationTimestamp="2026-01-21 15:26:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:26:54.197210495 +0000 UTC m=+155.732324384" watchObservedRunningTime="2026-01-21 15:26:54.197895505 +0000 UTC m=+155.733009394" Jan 21 15:26:54 crc kubenswrapper[5021]: I0121 15:26:54.229458 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-64lkf" podStartSLOduration=132.229440598 podStartE2EDuration="2m12.229440598s" podCreationTimestamp="2026-01-21 15:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:26:54.227795521 +0000 UTC m=+155.762909420" watchObservedRunningTime="2026-01-21 15:26:54.229440598 +0000 UTC m=+155.764554487" Jan 21 15:26:54 crc kubenswrapper[5021]: I0121 15:26:54.290627 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:54 crc kubenswrapper[5021]: E0121 15:26:54.290973 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:54.79095034 +0000 UTC m=+156.326064229 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:54 crc kubenswrapper[5021]: I0121 15:26:54.392271 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:54 crc kubenswrapper[5021]: E0121 15:26:54.392685 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:54.892668212 +0000 UTC m=+156.427782101 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:54 crc kubenswrapper[5021]: I0121 15:26:54.493602 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:54 crc kubenswrapper[5021]: E0121 15:26:54.493839 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:54.993810466 +0000 UTC m=+156.528924365 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:54 crc kubenswrapper[5021]: I0121 15:26:54.493974 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:54 crc kubenswrapper[5021]: E0121 15:26:54.494290 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:54.99427669 +0000 UTC m=+156.529390579 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:54 crc kubenswrapper[5021]: I0121 15:26:54.586405 5021 patch_prober.go:28] interesting pod/router-default-5444994796-4459k container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 15:26:54 crc kubenswrapper[5021]: [-]has-synced failed: reason withheld Jan 21 15:26:54 crc kubenswrapper[5021]: [+]process-running ok Jan 21 15:26:54 crc kubenswrapper[5021]: healthz check failed Jan 21 15:26:54 crc kubenswrapper[5021]: I0121 15:26:54.586472 5021 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4459k" podUID="c490e95d-e462-45b2-8352-9603283319e1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 15:26:54 crc kubenswrapper[5021]: I0121 15:26:54.595066 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:54 crc kubenswrapper[5021]: E0121 15:26:54.595381 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:55.095366964 +0000 UTC m=+156.630480853 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:54 crc kubenswrapper[5021]: I0121 15:26:54.696350 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:54 crc kubenswrapper[5021]: E0121 15:26:54.696740 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:55.196723074 +0000 UTC m=+156.731836963 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:54 crc kubenswrapper[5021]: I0121 15:26:54.791954 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-g8wp8" Jan 21 15:26:54 crc kubenswrapper[5021]: I0121 15:26:54.792477 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-g8wp8" Jan 21 15:26:54 crc kubenswrapper[5021]: I0121 15:26:54.794014 5021 patch_prober.go:28] interesting pod/apiserver-76f77b778f-g8wp8 container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="Get \"https://10.217.0.7:8443/livez\": dial tcp 10.217.0.7:8443: connect: connection refused" start-of-body= Jan 21 15:26:54 crc kubenswrapper[5021]: I0121 15:26:54.794060 5021 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-g8wp8" podUID="e72ac95a-ad13-408c-b595-9e983c185119" containerName="openshift-apiserver" probeResult="failure" output="Get \"https://10.217.0.7:8443/livez\": dial tcp 10.217.0.7:8443: connect: connection refused" Jan 21 15:26:54 crc kubenswrapper[5021]: I0121 15:26:54.797090 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:54 crc kubenswrapper[5021]: E0121 15:26:54.797447 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:55.297429937 +0000 UTC m=+156.832543836 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:54 crc kubenswrapper[5021]: I0121 15:26:54.898464 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:54 crc kubenswrapper[5021]: E0121 15:26:54.899815 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:55.399800536 +0000 UTC m=+156.934914425 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:54 crc kubenswrapper[5021]: I0121 15:26:54.932771 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlk6l" Jan 21 15:26:54 crc kubenswrapper[5021]: I0121 15:26:54.932830 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlk6l" Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.001419 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:55 crc kubenswrapper[5021]: E0121 15:26:55.001749 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:55.501704263 +0000 UTC m=+157.036818142 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.002560 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:55 crc kubenswrapper[5021]: E0121 15:26:55.003470 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:55.503453992 +0000 UTC m=+157.038567871 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.069960 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-6sz7l"] Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.071032 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6sz7l" Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.076173 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.095350 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6sz7l"] Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.102137 5021 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-w8nsc container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.39:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.102271 5021 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-w8nsc" podUID="8fb86530-c6d5-4e68-960b-2eadac1c8973" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.39:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.104526 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:55 crc kubenswrapper[5021]: E0121 15:26:55.104875 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:55.604859335 +0000 UTC m=+157.139973224 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.131845 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-pkmbf" event={"ID":"f0a0868f-a7c7-4bce-a9b5-855a11e2631e","Type":"ContainerStarted","Data":"005a666f43e1560e4b3942da3e7a71c861b500a846279100d91f23a63ddd08ff"} Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.206769 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85c51757-e7f5-487f-b873-a543118733b6-catalog-content\") pod \"community-operators-6sz7l\" (UID: \"85c51757-e7f5-487f-b873-a543118733b6\") " pod="openshift-marketplace/community-operators-6sz7l" Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.207336 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vpf9d\" (UniqueName: \"kubernetes.io/projected/85c51757-e7f5-487f-b873-a543118733b6-kube-api-access-vpf9d\") pod \"community-operators-6sz7l\" (UID: \"85c51757-e7f5-487f-b873-a543118733b6\") " pod="openshift-marketplace/community-operators-6sz7l" Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.207485 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.207727 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85c51757-e7f5-487f-b873-a543118733b6-utilities\") pod \"community-operators-6sz7l\" (UID: \"85c51757-e7f5-487f-b873-a543118733b6\") " pod="openshift-marketplace/community-operators-6sz7l" Jan 21 15:26:55 crc kubenswrapper[5021]: E0121 15:26:55.209314 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:55.709301743 +0000 UTC m=+157.244415632 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.253994 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-ds2cq"] Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.255248 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ds2cq" Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.258267 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.273405 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ds2cq"] Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.309073 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:55 crc kubenswrapper[5021]: E0121 15:26:55.309449 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:55.80943057 +0000 UTC m=+157.344544469 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.309481 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85c51757-e7f5-487f-b873-a543118733b6-utilities\") pod \"community-operators-6sz7l\" (UID: \"85c51757-e7f5-487f-b873-a543118733b6\") " pod="openshift-marketplace/community-operators-6sz7l" Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.309578 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85c51757-e7f5-487f-b873-a543118733b6-catalog-content\") pod \"community-operators-6sz7l\" (UID: \"85c51757-e7f5-487f-b873-a543118733b6\") " pod="openshift-marketplace/community-operators-6sz7l" Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.309617 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vpf9d\" (UniqueName: \"kubernetes.io/projected/85c51757-e7f5-487f-b873-a543118733b6-kube-api-access-vpf9d\") pod \"community-operators-6sz7l\" (UID: \"85c51757-e7f5-487f-b873-a543118733b6\") " pod="openshift-marketplace/community-operators-6sz7l" Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.310072 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85c51757-e7f5-487f-b873-a543118733b6-catalog-content\") pod \"community-operators-6sz7l\" (UID: \"85c51757-e7f5-487f-b873-a543118733b6\") " pod="openshift-marketplace/community-operators-6sz7l" Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.310260 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85c51757-e7f5-487f-b873-a543118733b6-utilities\") pod \"community-operators-6sz7l\" (UID: \"85c51757-e7f5-487f-b873-a543118733b6\") " pod="openshift-marketplace/community-operators-6sz7l" Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.335552 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vpf9d\" (UniqueName: \"kubernetes.io/projected/85c51757-e7f5-487f-b873-a543118733b6-kube-api-access-vpf9d\") pod \"community-operators-6sz7l\" (UID: \"85c51757-e7f5-487f-b873-a543118733b6\") " pod="openshift-marketplace/community-operators-6sz7l" Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.384736 5021 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.392715 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6sz7l" Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.410785 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c3b8b814-c3f4-4494-a9c4-f08dc74d895f-utilities\") pod \"certified-operators-ds2cq\" (UID: \"c3b8b814-c3f4-4494-a9c4-f08dc74d895f\") " pod="openshift-marketplace/certified-operators-ds2cq" Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.411145 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c3b8b814-c3f4-4494-a9c4-f08dc74d895f-catalog-content\") pod \"certified-operators-ds2cq\" (UID: \"c3b8b814-c3f4-4494-a9c4-f08dc74d895f\") " pod="openshift-marketplace/certified-operators-ds2cq" Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.411320 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:55 crc kubenswrapper[5021]: E0121 15:26:55.411690 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:55.911675806 +0000 UTC m=+157.446789695 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.412018 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cqpp9\" (UniqueName: \"kubernetes.io/projected/c3b8b814-c3f4-4494-a9c4-f08dc74d895f-kube-api-access-cqpp9\") pod \"certified-operators-ds2cq\" (UID: \"c3b8b814-c3f4-4494-a9c4-f08dc74d895f\") " pod="openshift-marketplace/certified-operators-ds2cq" Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.464846 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-29tp8"] Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.466034 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-29tp8" Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.512765 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.513033 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqpp9\" (UniqueName: \"kubernetes.io/projected/c3b8b814-c3f4-4494-a9c4-f08dc74d895f-kube-api-access-cqpp9\") pod \"certified-operators-ds2cq\" (UID: \"c3b8b814-c3f4-4494-a9c4-f08dc74d895f\") " pod="openshift-marketplace/certified-operators-ds2cq" Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.513090 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c3b8b814-c3f4-4494-a9c4-f08dc74d895f-utilities\") pod \"certified-operators-ds2cq\" (UID: \"c3b8b814-c3f4-4494-a9c4-f08dc74d895f\") " pod="openshift-marketplace/certified-operators-ds2cq" Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.513168 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c3b8b814-c3f4-4494-a9c4-f08dc74d895f-catalog-content\") pod \"certified-operators-ds2cq\" (UID: \"c3b8b814-c3f4-4494-a9c4-f08dc74d895f\") " pod="openshift-marketplace/certified-operators-ds2cq" Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.513677 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c3b8b814-c3f4-4494-a9c4-f08dc74d895f-catalog-content\") pod \"certified-operators-ds2cq\" (UID: \"c3b8b814-c3f4-4494-a9c4-f08dc74d895f\") " pod="openshift-marketplace/certified-operators-ds2cq" Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.513985 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c3b8b814-c3f4-4494-a9c4-f08dc74d895f-utilities\") pod \"certified-operators-ds2cq\" (UID: \"c3b8b814-c3f4-4494-a9c4-f08dc74d895f\") " pod="openshift-marketplace/certified-operators-ds2cq" Jan 21 15:26:55 crc kubenswrapper[5021]: E0121 15:26:55.514173 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:56.014153528 +0000 UTC m=+157.549267417 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.546383 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-29tp8"] Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.552521 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqpp9\" (UniqueName: \"kubernetes.io/projected/c3b8b814-c3f4-4494-a9c4-f08dc74d895f-kube-api-access-cqpp9\") pod \"certified-operators-ds2cq\" (UID: \"c3b8b814-c3f4-4494-a9c4-f08dc74d895f\") " pod="openshift-marketplace/certified-operators-ds2cq" Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.571498 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ds2cq" Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.593924 5021 patch_prober.go:28] interesting pod/router-default-5444994796-4459k container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 15:26:55 crc kubenswrapper[5021]: [-]has-synced failed: reason withheld Jan 21 15:26:55 crc kubenswrapper[5021]: [+]process-running ok Jan 21 15:26:55 crc kubenswrapper[5021]: healthz check failed Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.594272 5021 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4459k" podUID="c490e95d-e462-45b2-8352-9603283319e1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.614067 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/af36dabe-9fde-4042-b317-6568e27fee70-catalog-content\") pod \"community-operators-29tp8\" (UID: \"af36dabe-9fde-4042-b317-6568e27fee70\") " pod="openshift-marketplace/community-operators-29tp8" Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.614307 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8fv27\" (UniqueName: \"kubernetes.io/projected/af36dabe-9fde-4042-b317-6568e27fee70-kube-api-access-8fv27\") pod \"community-operators-29tp8\" (UID: \"af36dabe-9fde-4042-b317-6568e27fee70\") " pod="openshift-marketplace/community-operators-29tp8" Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.614404 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.614522 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/af36dabe-9fde-4042-b317-6568e27fee70-utilities\") pod \"community-operators-29tp8\" (UID: \"af36dabe-9fde-4042-b317-6568e27fee70\") " pod="openshift-marketplace/community-operators-29tp8" Jan 21 15:26:55 crc kubenswrapper[5021]: E0121 15:26:55.614995 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:56.114980284 +0000 UTC m=+157.650094173 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.647006 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlk6l" Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.664250 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-629pj"] Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.665280 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-629pj" Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.677996 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-629pj"] Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.684691 5021 patch_prober.go:28] interesting pod/apiserver-7bbb656c7d-wlk6l container/oauth-apiserver namespace/openshift-oauth-apiserver: Readiness probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Jan 21 15:26:55 crc kubenswrapper[5021]: [+]log ok Jan 21 15:26:55 crc kubenswrapper[5021]: [+]etcd ok Jan 21 15:26:55 crc kubenswrapper[5021]: [+]etcd-readiness ok Jan 21 15:26:55 crc kubenswrapper[5021]: [+]poststarthook/start-apiserver-admission-initializer ok Jan 21 15:26:55 crc kubenswrapper[5021]: [-]informer-sync failed: reason withheld Jan 21 15:26:55 crc kubenswrapper[5021]: [+]poststarthook/generic-apiserver-start-informers ok Jan 21 15:26:55 crc kubenswrapper[5021]: [+]poststarthook/max-in-flight-filter ok Jan 21 15:26:55 crc kubenswrapper[5021]: [+]poststarthook/storage-object-count-tracker-hook ok Jan 21 15:26:55 crc kubenswrapper[5021]: [+]poststarthook/openshift.io-StartUserInformer ok Jan 21 15:26:55 crc kubenswrapper[5021]: [+]poststarthook/openshift.io-StartOAuthInformer ok Jan 21 15:26:55 crc kubenswrapper[5021]: [+]poststarthook/openshift.io-StartTokenTimeoutUpdater ok Jan 21 15:26:55 crc kubenswrapper[5021]: [+]shutdown ok Jan 21 15:26:55 crc kubenswrapper[5021]: readyz check failed Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.684749 5021 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlk6l" podUID="549f101f-6acf-41be-9263-57bb5902cbd6" containerName="oauth-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.717505 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.720110 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/af36dabe-9fde-4042-b317-6568e27fee70-utilities\") pod \"community-operators-29tp8\" (UID: \"af36dabe-9fde-4042-b317-6568e27fee70\") " pod="openshift-marketplace/community-operators-29tp8" Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.720304 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/af36dabe-9fde-4042-b317-6568e27fee70-catalog-content\") pod \"community-operators-29tp8\" (UID: \"af36dabe-9fde-4042-b317-6568e27fee70\") " pod="openshift-marketplace/community-operators-29tp8" Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.720356 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8fv27\" (UniqueName: \"kubernetes.io/projected/af36dabe-9fde-4042-b317-6568e27fee70-kube-api-access-8fv27\") pod \"community-operators-29tp8\" (UID: \"af36dabe-9fde-4042-b317-6568e27fee70\") " pod="openshift-marketplace/community-operators-29tp8" Jan 21 15:26:55 crc kubenswrapper[5021]: E0121 15:26:55.720871 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:56.220825123 +0000 UTC m=+157.755939022 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.722010 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/af36dabe-9fde-4042-b317-6568e27fee70-utilities\") pod \"community-operators-29tp8\" (UID: \"af36dabe-9fde-4042-b317-6568e27fee70\") " pod="openshift-marketplace/community-operators-29tp8" Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.722427 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/af36dabe-9fde-4042-b317-6568e27fee70-catalog-content\") pod \"community-operators-29tp8\" (UID: \"af36dabe-9fde-4042-b317-6568e27fee70\") " pod="openshift-marketplace/community-operators-29tp8" Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.734385 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-vpddf" Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.771071 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8fv27\" (UniqueName: \"kubernetes.io/projected/af36dabe-9fde-4042-b317-6568e27fee70-kube-api-access-8fv27\") pod \"community-operators-29tp8\" (UID: \"af36dabe-9fde-4042-b317-6568e27fee70\") " pod="openshift-marketplace/community-operators-29tp8" Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.811390 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-29tp8" Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.823358 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bt8s2\" (UniqueName: \"kubernetes.io/projected/b2566bd0-1929-44cb-93b4-f09cc52bf852-kube-api-access-bt8s2\") pod \"certified-operators-629pj\" (UID: \"b2566bd0-1929-44cb-93b4-f09cc52bf852\") " pod="openshift-marketplace/certified-operators-629pj" Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.823403 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b2566bd0-1929-44cb-93b4-f09cc52bf852-utilities\") pod \"certified-operators-629pj\" (UID: \"b2566bd0-1929-44cb-93b4-f09cc52bf852\") " pod="openshift-marketplace/certified-operators-629pj" Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.823425 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.823449 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b2566bd0-1929-44cb-93b4-f09cc52bf852-catalog-content\") pod \"certified-operators-629pj\" (UID: \"b2566bd0-1929-44cb-93b4-f09cc52bf852\") " pod="openshift-marketplace/certified-operators-629pj" Jan 21 15:26:55 crc kubenswrapper[5021]: E0121 15:26:55.843002 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-21 15:26:56.342974723 +0000 UTC m=+157.878088622 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zmqhz" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.903046 5021 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2026-01-21T15:26:55.385094453Z","Handler":null,"Name":""} Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.925830 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.926156 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bt8s2\" (UniqueName: \"kubernetes.io/projected/b2566bd0-1929-44cb-93b4-f09cc52bf852-kube-api-access-bt8s2\") pod \"certified-operators-629pj\" (UID: \"b2566bd0-1929-44cb-93b4-f09cc52bf852\") " pod="openshift-marketplace/certified-operators-629pj" Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.926200 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b2566bd0-1929-44cb-93b4-f09cc52bf852-utilities\") pod \"certified-operators-629pj\" (UID: \"b2566bd0-1929-44cb-93b4-f09cc52bf852\") " pod="openshift-marketplace/certified-operators-629pj" Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.926228 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b2566bd0-1929-44cb-93b4-f09cc52bf852-catalog-content\") pod \"certified-operators-629pj\" (UID: \"b2566bd0-1929-44cb-93b4-f09cc52bf852\") " pod="openshift-marketplace/certified-operators-629pj" Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.926665 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b2566bd0-1929-44cb-93b4-f09cc52bf852-catalog-content\") pod \"certified-operators-629pj\" (UID: \"b2566bd0-1929-44cb-93b4-f09cc52bf852\") " pod="openshift-marketplace/certified-operators-629pj" Jan 21 15:26:55 crc kubenswrapper[5021]: E0121 15:26:55.926732 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-21 15:26:56.426717305 +0000 UTC m=+157.961831194 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.927223 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b2566bd0-1929-44cb-93b4-f09cc52bf852-utilities\") pod \"certified-operators-629pj\" (UID: \"b2566bd0-1929-44cb-93b4-f09cc52bf852\") " pod="openshift-marketplace/certified-operators-629pj" Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.963215 5021 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.963256 5021 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.984995 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bt8s2\" (UniqueName: \"kubernetes.io/projected/b2566bd0-1929-44cb-93b4-f09cc52bf852-kube-api-access-bt8s2\") pod \"certified-operators-629pj\" (UID: \"b2566bd0-1929-44cb-93b4-f09cc52bf852\") " pod="openshift-marketplace/certified-operators-629pj" Jan 21 15:26:55 crc kubenswrapper[5021]: I0121 15:26:55.992240 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-629pj" Jan 21 15:26:56 crc kubenswrapper[5021]: I0121 15:26:56.035628 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:56 crc kubenswrapper[5021]: I0121 15:26:56.063197 5021 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 21 15:26:56 crc kubenswrapper[5021]: I0121 15:26:56.063245 5021 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:56 crc kubenswrapper[5021]: I0121 15:26:56.075295 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-kn7v9" Jan 21 15:26:56 crc kubenswrapper[5021]: I0121 15:26:56.192084 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zmqhz\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:56 crc kubenswrapper[5021]: I0121 15:26:56.204062 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-pkmbf" event={"ID":"f0a0868f-a7c7-4bce-a9b5-855a11e2631e","Type":"ContainerStarted","Data":"43e29439b2eae473af10a941e17e7a733f9248585ea91a9052b02d666de54bd2"} Jan 21 15:26:56 crc kubenswrapper[5021]: I0121 15:26:56.238333 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 21 15:26:56 crc kubenswrapper[5021]: I0121 15:26:56.252793 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 21 15:26:56 crc kubenswrapper[5021]: I0121 15:26:56.347272 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:56 crc kubenswrapper[5021]: I0121 15:26:56.439194 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6sz7l"] Jan 21 15:26:56 crc kubenswrapper[5021]: I0121 15:26:56.489365 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ds2cq"] Jan 21 15:26:56 crc kubenswrapper[5021]: I0121 15:26:56.578341 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-4459k" Jan 21 15:26:56 crc kubenswrapper[5021]: I0121 15:26:56.636865 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-w8nsc" Jan 21 15:26:56 crc kubenswrapper[5021]: I0121 15:26:56.652816 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-m2brn" Jan 21 15:26:56 crc kubenswrapper[5021]: I0121 15:26:56.703135 5021 patch_prober.go:28] interesting pod/router-default-5444994796-4459k container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 15:26:56 crc kubenswrapper[5021]: [-]has-synced failed: reason withheld Jan 21 15:26:56 crc kubenswrapper[5021]: [+]process-running ok Jan 21 15:26:56 crc kubenswrapper[5021]: healthz check failed Jan 21 15:26:56 crc kubenswrapper[5021]: I0121 15:26:56.703199 5021 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4459k" podUID="c490e95d-e462-45b2-8352-9603283319e1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 15:26:56 crc kubenswrapper[5021]: I0121 15:26:56.771015 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Jan 21 15:26:56 crc kubenswrapper[5021]: I0121 15:26:56.817232 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-629pj"] Jan 21 15:26:56 crc kubenswrapper[5021]: I0121 15:26:56.945567 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-zmqhz"] Jan 21 15:26:57 crc kubenswrapper[5021]: I0121 15:26:57.065439 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-g85c4"] Jan 21 15:26:57 crc kubenswrapper[5021]: I0121 15:26:57.066575 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-g85c4" Jan 21 15:26:57 crc kubenswrapper[5021]: I0121 15:26:57.066989 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-g85c4"] Jan 21 15:26:57 crc kubenswrapper[5021]: I0121 15:26:57.070933 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-29tp8"] Jan 21 15:26:57 crc kubenswrapper[5021]: I0121 15:26:57.099419 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 21 15:26:57 crc kubenswrapper[5021]: I0121 15:26:57.169125 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7b20493-5547-4a23-a8c4-411119736b50-catalog-content\") pod \"redhat-marketplace-g85c4\" (UID: \"f7b20493-5547-4a23-a8c4-411119736b50\") " pod="openshift-marketplace/redhat-marketplace-g85c4" Jan 21 15:26:57 crc kubenswrapper[5021]: I0121 15:26:57.169182 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7b20493-5547-4a23-a8c4-411119736b50-utilities\") pod \"redhat-marketplace-g85c4\" (UID: \"f7b20493-5547-4a23-a8c4-411119736b50\") " pod="openshift-marketplace/redhat-marketplace-g85c4" Jan 21 15:26:57 crc kubenswrapper[5021]: I0121 15:26:57.169221 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qrml2\" (UniqueName: \"kubernetes.io/projected/f7b20493-5547-4a23-a8c4-411119736b50-kube-api-access-qrml2\") pod \"redhat-marketplace-g85c4\" (UID: \"f7b20493-5547-4a23-a8c4-411119736b50\") " pod="openshift-marketplace/redhat-marketplace-g85c4" Jan 21 15:26:57 crc kubenswrapper[5021]: I0121 15:26:57.218541 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-629pj" event={"ID":"b2566bd0-1929-44cb-93b4-f09cc52bf852","Type":"ContainerStarted","Data":"01d512f63b37766e2432a5404c5d4339c207159d9fb400a6ba96abcc20e80ca5"} Jan 21 15:26:57 crc kubenswrapper[5021]: I0121 15:26:57.232093 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" event={"ID":"e1a2d469-35da-4253-b3a5-057b68c4d68b","Type":"ContainerStarted","Data":"9008e9eaf3bd6a0b6babf4bc3deb59e9f130f9526ac4d4540ffe77c532d8eb45"} Jan 21 15:26:57 crc kubenswrapper[5021]: I0121 15:26:57.244306 5021 generic.go:334] "Generic (PLEG): container finished" podID="c3b8b814-c3f4-4494-a9c4-f08dc74d895f" containerID="7d36ec51cd1407d0e2c6d6ab72c7a870318fc7ed77eecb119724bdd8a994eaf2" exitCode=0 Jan 21 15:26:57 crc kubenswrapper[5021]: I0121 15:26:57.244416 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ds2cq" event={"ID":"c3b8b814-c3f4-4494-a9c4-f08dc74d895f","Type":"ContainerDied","Data":"7d36ec51cd1407d0e2c6d6ab72c7a870318fc7ed77eecb119724bdd8a994eaf2"} Jan 21 15:26:57 crc kubenswrapper[5021]: I0121 15:26:57.244445 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ds2cq" event={"ID":"c3b8b814-c3f4-4494-a9c4-f08dc74d895f","Type":"ContainerStarted","Data":"dab72743b7fa95fb291fe1f9f42b74b4558e352fb1e665183b04d6348396a5d7"} Jan 21 15:26:57 crc kubenswrapper[5021]: I0121 15:26:57.246812 5021 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 21 15:26:57 crc kubenswrapper[5021]: I0121 15:26:57.249325 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-pkmbf" event={"ID":"f0a0868f-a7c7-4bce-a9b5-855a11e2631e","Type":"ContainerStarted","Data":"34e072dfa1ae1114eab9fea07fa25e3710160f6c3964ccfdac02366e6d9f711b"} Jan 21 15:26:57 crc kubenswrapper[5021]: I0121 15:26:57.251165 5021 generic.go:334] "Generic (PLEG): container finished" podID="85c51757-e7f5-487f-b873-a543118733b6" containerID="585f1d340aa37dd148d32e3cd98d6c3809a943cdca402faa615f801d805849bc" exitCode=0 Jan 21 15:26:57 crc kubenswrapper[5021]: I0121 15:26:57.251353 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6sz7l" event={"ID":"85c51757-e7f5-487f-b873-a543118733b6","Type":"ContainerDied","Data":"585f1d340aa37dd148d32e3cd98d6c3809a943cdca402faa615f801d805849bc"} Jan 21 15:26:57 crc kubenswrapper[5021]: I0121 15:26:57.251424 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6sz7l" event={"ID":"85c51757-e7f5-487f-b873-a543118733b6","Type":"ContainerStarted","Data":"c76c0ede83f8f3bc1867305fab23327c14d4623e500ddb7b78156582d1e28c26"} Jan 21 15:26:57 crc kubenswrapper[5021]: I0121 15:26:57.253793 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-29tp8" event={"ID":"af36dabe-9fde-4042-b317-6568e27fee70","Type":"ContainerStarted","Data":"d43a3db41dd53a96e2b70b862c39ca12ce7e9c12ef7a69cdb8cdc2d112fcdd0e"} Jan 21 15:26:57 crc kubenswrapper[5021]: I0121 15:26:57.271856 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7b20493-5547-4a23-a8c4-411119736b50-utilities\") pod \"redhat-marketplace-g85c4\" (UID: \"f7b20493-5547-4a23-a8c4-411119736b50\") " pod="openshift-marketplace/redhat-marketplace-g85c4" Jan 21 15:26:57 crc kubenswrapper[5021]: I0121 15:26:57.272007 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qrml2\" (UniqueName: \"kubernetes.io/projected/f7b20493-5547-4a23-a8c4-411119736b50-kube-api-access-qrml2\") pod \"redhat-marketplace-g85c4\" (UID: \"f7b20493-5547-4a23-a8c4-411119736b50\") " pod="openshift-marketplace/redhat-marketplace-g85c4" Jan 21 15:26:57 crc kubenswrapper[5021]: I0121 15:26:57.272097 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7b20493-5547-4a23-a8c4-411119736b50-catalog-content\") pod \"redhat-marketplace-g85c4\" (UID: \"f7b20493-5547-4a23-a8c4-411119736b50\") " pod="openshift-marketplace/redhat-marketplace-g85c4" Jan 21 15:26:57 crc kubenswrapper[5021]: I0121 15:26:57.272894 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7b20493-5547-4a23-a8c4-411119736b50-utilities\") pod \"redhat-marketplace-g85c4\" (UID: \"f7b20493-5547-4a23-a8c4-411119736b50\") " pod="openshift-marketplace/redhat-marketplace-g85c4" Jan 21 15:26:57 crc kubenswrapper[5021]: I0121 15:26:57.273144 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7b20493-5547-4a23-a8c4-411119736b50-catalog-content\") pod \"redhat-marketplace-g85c4\" (UID: \"f7b20493-5547-4a23-a8c4-411119736b50\") " pod="openshift-marketplace/redhat-marketplace-g85c4" Jan 21 15:26:57 crc kubenswrapper[5021]: I0121 15:26:57.310532 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qrml2\" (UniqueName: \"kubernetes.io/projected/f7b20493-5547-4a23-a8c4-411119736b50-kube-api-access-qrml2\") pod \"redhat-marketplace-g85c4\" (UID: \"f7b20493-5547-4a23-a8c4-411119736b50\") " pod="openshift-marketplace/redhat-marketplace-g85c4" Jan 21 15:26:57 crc kubenswrapper[5021]: I0121 15:26:57.311371 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-pkmbf" podStartSLOduration=14.311354426 podStartE2EDuration="14.311354426s" podCreationTimestamp="2026-01-21 15:26:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:26:57.288380164 +0000 UTC m=+158.823494063" watchObservedRunningTime="2026-01-21 15:26:57.311354426 +0000 UTC m=+158.846468315" Jan 21 15:26:57 crc kubenswrapper[5021]: I0121 15:26:57.449231 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-xjfsb"] Jan 21 15:26:57 crc kubenswrapper[5021]: I0121 15:26:57.450637 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xjfsb" Jan 21 15:26:57 crc kubenswrapper[5021]: I0121 15:26:57.462767 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xjfsb"] Jan 21 15:26:57 crc kubenswrapper[5021]: I0121 15:26:57.575635 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p2q9v\" (UniqueName: \"kubernetes.io/projected/6dacec5e-eca8-4362-82e1-95c571054d9d-kube-api-access-p2q9v\") pod \"redhat-marketplace-xjfsb\" (UID: \"6dacec5e-eca8-4362-82e1-95c571054d9d\") " pod="openshift-marketplace/redhat-marketplace-xjfsb" Jan 21 15:26:57 crc kubenswrapper[5021]: I0121 15:26:57.575706 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6dacec5e-eca8-4362-82e1-95c571054d9d-utilities\") pod \"redhat-marketplace-xjfsb\" (UID: \"6dacec5e-eca8-4362-82e1-95c571054d9d\") " pod="openshift-marketplace/redhat-marketplace-xjfsb" Jan 21 15:26:57 crc kubenswrapper[5021]: I0121 15:26:57.575742 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6dacec5e-eca8-4362-82e1-95c571054d9d-catalog-content\") pod \"redhat-marketplace-xjfsb\" (UID: \"6dacec5e-eca8-4362-82e1-95c571054d9d\") " pod="openshift-marketplace/redhat-marketplace-xjfsb" Jan 21 15:26:57 crc kubenswrapper[5021]: I0121 15:26:57.583741 5021 patch_prober.go:28] interesting pod/router-default-5444994796-4459k container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 15:26:57 crc kubenswrapper[5021]: [-]has-synced failed: reason withheld Jan 21 15:26:57 crc kubenswrapper[5021]: [+]process-running ok Jan 21 15:26:57 crc kubenswrapper[5021]: healthz check failed Jan 21 15:26:57 crc kubenswrapper[5021]: I0121 15:26:57.583867 5021 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4459k" podUID="c490e95d-e462-45b2-8352-9603283319e1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 15:26:57 crc kubenswrapper[5021]: I0121 15:26:57.612407 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-g85c4" Jan 21 15:26:57 crc kubenswrapper[5021]: I0121 15:26:57.677542 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p2q9v\" (UniqueName: \"kubernetes.io/projected/6dacec5e-eca8-4362-82e1-95c571054d9d-kube-api-access-p2q9v\") pod \"redhat-marketplace-xjfsb\" (UID: \"6dacec5e-eca8-4362-82e1-95c571054d9d\") " pod="openshift-marketplace/redhat-marketplace-xjfsb" Jan 21 15:26:57 crc kubenswrapper[5021]: I0121 15:26:57.677616 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6dacec5e-eca8-4362-82e1-95c571054d9d-utilities\") pod \"redhat-marketplace-xjfsb\" (UID: \"6dacec5e-eca8-4362-82e1-95c571054d9d\") " pod="openshift-marketplace/redhat-marketplace-xjfsb" Jan 21 15:26:57 crc kubenswrapper[5021]: I0121 15:26:57.677653 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6dacec5e-eca8-4362-82e1-95c571054d9d-catalog-content\") pod \"redhat-marketplace-xjfsb\" (UID: \"6dacec5e-eca8-4362-82e1-95c571054d9d\") " pod="openshift-marketplace/redhat-marketplace-xjfsb" Jan 21 15:26:57 crc kubenswrapper[5021]: I0121 15:26:57.678298 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6dacec5e-eca8-4362-82e1-95c571054d9d-catalog-content\") pod \"redhat-marketplace-xjfsb\" (UID: \"6dacec5e-eca8-4362-82e1-95c571054d9d\") " pod="openshift-marketplace/redhat-marketplace-xjfsb" Jan 21 15:26:57 crc kubenswrapper[5021]: I0121 15:26:57.678592 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6dacec5e-eca8-4362-82e1-95c571054d9d-utilities\") pod \"redhat-marketplace-xjfsb\" (UID: \"6dacec5e-eca8-4362-82e1-95c571054d9d\") " pod="openshift-marketplace/redhat-marketplace-xjfsb" Jan 21 15:26:57 crc kubenswrapper[5021]: I0121 15:26:57.710444 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p2q9v\" (UniqueName: \"kubernetes.io/projected/6dacec5e-eca8-4362-82e1-95c571054d9d-kube-api-access-p2q9v\") pod \"redhat-marketplace-xjfsb\" (UID: \"6dacec5e-eca8-4362-82e1-95c571054d9d\") " pod="openshift-marketplace/redhat-marketplace-xjfsb" Jan 21 15:26:57 crc kubenswrapper[5021]: I0121 15:26:57.911252 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xjfsb" Jan 21 15:26:57 crc kubenswrapper[5021]: I0121 15:26:57.941501 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-g85c4"] Jan 21 15:26:57 crc kubenswrapper[5021]: W0121 15:26:57.955238 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf7b20493_5547_4a23_a8c4_411119736b50.slice/crio-c2194e882d6879c2fdf51a7ea600619872a58190c95eecc7cc4b2ae0aaa6e5ea WatchSource:0}: Error finding container c2194e882d6879c2fdf51a7ea600619872a58190c95eecc7cc4b2ae0aaa6e5ea: Status 404 returned error can't find the container with id c2194e882d6879c2fdf51a7ea600619872a58190c95eecc7cc4b2ae0aaa6e5ea Jan 21 15:26:58 crc kubenswrapper[5021]: I0121 15:26:58.247393 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-74dm9"] Jan 21 15:26:58 crc kubenswrapper[5021]: I0121 15:26:58.249354 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-74dm9" Jan 21 15:26:58 crc kubenswrapper[5021]: I0121 15:26:58.253877 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 21 15:26:58 crc kubenswrapper[5021]: I0121 15:26:58.261072 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 21 15:26:58 crc kubenswrapper[5021]: I0121 15:26:58.261862 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 21 15:26:58 crc kubenswrapper[5021]: I0121 15:26:58.274639 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Jan 21 15:26:58 crc kubenswrapper[5021]: I0121 15:26:58.275087 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Jan 21 15:26:58 crc kubenswrapper[5021]: I0121 15:26:58.277397 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-74dm9"] Jan 21 15:26:58 crc kubenswrapper[5021]: I0121 15:26:58.287895 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 21 15:26:58 crc kubenswrapper[5021]: I0121 15:26:58.324960 5021 generic.go:334] "Generic (PLEG): container finished" podID="b2566bd0-1929-44cb-93b4-f09cc52bf852" containerID="326dfcbf0f6f581ed121f94ab91cff85323b3f3018ec8a0094a19cd3221a6621" exitCode=0 Jan 21 15:26:58 crc kubenswrapper[5021]: I0121 15:26:58.325111 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-629pj" event={"ID":"b2566bd0-1929-44cb-93b4-f09cc52bf852","Type":"ContainerDied","Data":"326dfcbf0f6f581ed121f94ab91cff85323b3f3018ec8a0094a19cd3221a6621"} Jan 21 15:26:58 crc kubenswrapper[5021]: I0121 15:26:58.344895 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xjfsb"] Jan 21 15:26:58 crc kubenswrapper[5021]: I0121 15:26:58.355254 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" event={"ID":"e1a2d469-35da-4253-b3a5-057b68c4d68b","Type":"ContainerStarted","Data":"b085241572c09dc7e059e623f51d2d3997115952267fb4ff501386d274be131e"} Jan 21 15:26:58 crc kubenswrapper[5021]: I0121 15:26:58.356115 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:26:58 crc kubenswrapper[5021]: I0121 15:26:58.391566 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gsjjr\" (UniqueName: \"kubernetes.io/projected/e2e95c64-dd6b-4071-8cec-2832ccc612fe-kube-api-access-gsjjr\") pod \"redhat-operators-74dm9\" (UID: \"e2e95c64-dd6b-4071-8cec-2832ccc612fe\") " pod="openshift-marketplace/redhat-operators-74dm9" Jan 21 15:26:58 crc kubenswrapper[5021]: I0121 15:26:58.391641 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e2e95c64-dd6b-4071-8cec-2832ccc612fe-catalog-content\") pod \"redhat-operators-74dm9\" (UID: \"e2e95c64-dd6b-4071-8cec-2832ccc612fe\") " pod="openshift-marketplace/redhat-operators-74dm9" Jan 21 15:26:58 crc kubenswrapper[5021]: I0121 15:26:58.391685 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e2e95c64-dd6b-4071-8cec-2832ccc612fe-utilities\") pod \"redhat-operators-74dm9\" (UID: \"e2e95c64-dd6b-4071-8cec-2832ccc612fe\") " pod="openshift-marketplace/redhat-operators-74dm9" Jan 21 15:26:58 crc kubenswrapper[5021]: I0121 15:26:58.391703 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7e83d702-b89d-4f38-941e-1531fb0d868f-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"7e83d702-b89d-4f38-941e-1531fb0d868f\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 21 15:26:58 crc kubenswrapper[5021]: I0121 15:26:58.391720 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7e83d702-b89d-4f38-941e-1531fb0d868f-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"7e83d702-b89d-4f38-941e-1531fb0d868f\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 21 15:26:58 crc kubenswrapper[5021]: I0121 15:26:58.400278 5021 generic.go:334] "Generic (PLEG): container finished" podID="af36dabe-9fde-4042-b317-6568e27fee70" containerID="f8f4ae4a2e0af3c461b2d795527c54b4a7bfd7d0450c0d9f211bf9a3fb0d98f7" exitCode=0 Jan 21 15:26:58 crc kubenswrapper[5021]: I0121 15:26:58.400395 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-29tp8" event={"ID":"af36dabe-9fde-4042-b317-6568e27fee70","Type":"ContainerDied","Data":"f8f4ae4a2e0af3c461b2d795527c54b4a7bfd7d0450c0d9f211bf9a3fb0d98f7"} Jan 21 15:26:58 crc kubenswrapper[5021]: I0121 15:26:58.422177 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-g85c4" event={"ID":"f7b20493-5547-4a23-a8c4-411119736b50","Type":"ContainerStarted","Data":"ae92eb1c218afa919addafa83cd99eeca12614a69420a29f130b3a3337127886"} Jan 21 15:26:58 crc kubenswrapper[5021]: I0121 15:26:58.422225 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-g85c4" event={"ID":"f7b20493-5547-4a23-a8c4-411119736b50","Type":"ContainerStarted","Data":"c2194e882d6879c2fdf51a7ea600619872a58190c95eecc7cc4b2ae0aaa6e5ea"} Jan 21 15:26:58 crc kubenswrapper[5021]: I0121 15:26:58.483858 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" podStartSLOduration=136.483838536 podStartE2EDuration="2m16.483838536s" podCreationTimestamp="2026-01-21 15:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:26:58.478648889 +0000 UTC m=+160.013762768" watchObservedRunningTime="2026-01-21 15:26:58.483838536 +0000 UTC m=+160.018952425" Jan 21 15:26:58 crc kubenswrapper[5021]: I0121 15:26:58.493278 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gsjjr\" (UniqueName: \"kubernetes.io/projected/e2e95c64-dd6b-4071-8cec-2832ccc612fe-kube-api-access-gsjjr\") pod \"redhat-operators-74dm9\" (UID: \"e2e95c64-dd6b-4071-8cec-2832ccc612fe\") " pod="openshift-marketplace/redhat-operators-74dm9" Jan 21 15:26:58 crc kubenswrapper[5021]: I0121 15:26:58.493383 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e2e95c64-dd6b-4071-8cec-2832ccc612fe-catalog-content\") pod \"redhat-operators-74dm9\" (UID: \"e2e95c64-dd6b-4071-8cec-2832ccc612fe\") " pod="openshift-marketplace/redhat-operators-74dm9" Jan 21 15:26:58 crc kubenswrapper[5021]: I0121 15:26:58.493479 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e2e95c64-dd6b-4071-8cec-2832ccc612fe-utilities\") pod \"redhat-operators-74dm9\" (UID: \"e2e95c64-dd6b-4071-8cec-2832ccc612fe\") " pod="openshift-marketplace/redhat-operators-74dm9" Jan 21 15:26:58 crc kubenswrapper[5021]: I0121 15:26:58.493505 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7e83d702-b89d-4f38-941e-1531fb0d868f-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"7e83d702-b89d-4f38-941e-1531fb0d868f\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 21 15:26:58 crc kubenswrapper[5021]: I0121 15:26:58.493528 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7e83d702-b89d-4f38-941e-1531fb0d868f-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"7e83d702-b89d-4f38-941e-1531fb0d868f\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 21 15:26:58 crc kubenswrapper[5021]: I0121 15:26:58.495265 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e2e95c64-dd6b-4071-8cec-2832ccc612fe-utilities\") pod \"redhat-operators-74dm9\" (UID: \"e2e95c64-dd6b-4071-8cec-2832ccc612fe\") " pod="openshift-marketplace/redhat-operators-74dm9" Jan 21 15:26:58 crc kubenswrapper[5021]: I0121 15:26:58.495529 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e2e95c64-dd6b-4071-8cec-2832ccc612fe-catalog-content\") pod \"redhat-operators-74dm9\" (UID: \"e2e95c64-dd6b-4071-8cec-2832ccc612fe\") " pod="openshift-marketplace/redhat-operators-74dm9" Jan 21 15:26:58 crc kubenswrapper[5021]: I0121 15:26:58.495620 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7e83d702-b89d-4f38-941e-1531fb0d868f-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"7e83d702-b89d-4f38-941e-1531fb0d868f\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 21 15:26:58 crc kubenswrapper[5021]: I0121 15:26:58.538600 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gsjjr\" (UniqueName: \"kubernetes.io/projected/e2e95c64-dd6b-4071-8cec-2832ccc612fe-kube-api-access-gsjjr\") pod \"redhat-operators-74dm9\" (UID: \"e2e95c64-dd6b-4071-8cec-2832ccc612fe\") " pod="openshift-marketplace/redhat-operators-74dm9" Jan 21 15:26:58 crc kubenswrapper[5021]: I0121 15:26:58.538653 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7e83d702-b89d-4f38-941e-1531fb0d868f-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"7e83d702-b89d-4f38-941e-1531fb0d868f\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 21 15:26:58 crc kubenswrapper[5021]: I0121 15:26:58.584933 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-74dm9" Jan 21 15:26:58 crc kubenswrapper[5021]: I0121 15:26:58.586141 5021 patch_prober.go:28] interesting pod/router-default-5444994796-4459k container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 15:26:58 crc kubenswrapper[5021]: [-]has-synced failed: reason withheld Jan 21 15:26:58 crc kubenswrapper[5021]: [+]process-running ok Jan 21 15:26:58 crc kubenswrapper[5021]: healthz check failed Jan 21 15:26:58 crc kubenswrapper[5021]: I0121 15:26:58.586194 5021 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4459k" podUID="c490e95d-e462-45b2-8352-9603283319e1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 15:26:58 crc kubenswrapper[5021]: I0121 15:26:58.647520 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-svghk"] Jan 21 15:26:58 crc kubenswrapper[5021]: I0121 15:26:58.651473 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-svghk" Jan 21 15:26:58 crc kubenswrapper[5021]: I0121 15:26:58.672402 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-svghk"] Jan 21 15:26:58 crc kubenswrapper[5021]: I0121 15:26:58.697778 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/02187944-ab37-42f4-898b-eced0c5a1059-utilities\") pod \"redhat-operators-svghk\" (UID: \"02187944-ab37-42f4-898b-eced0c5a1059\") " pod="openshift-marketplace/redhat-operators-svghk" Jan 21 15:26:58 crc kubenswrapper[5021]: I0121 15:26:58.697857 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gp9n2\" (UniqueName: \"kubernetes.io/projected/02187944-ab37-42f4-898b-eced0c5a1059-kube-api-access-gp9n2\") pod \"redhat-operators-svghk\" (UID: \"02187944-ab37-42f4-898b-eced0c5a1059\") " pod="openshift-marketplace/redhat-operators-svghk" Jan 21 15:26:58 crc kubenswrapper[5021]: I0121 15:26:58.697966 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/02187944-ab37-42f4-898b-eced0c5a1059-catalog-content\") pod \"redhat-operators-svghk\" (UID: \"02187944-ab37-42f4-898b-eced0c5a1059\") " pod="openshift-marketplace/redhat-operators-svghk" Jan 21 15:26:58 crc kubenswrapper[5021]: I0121 15:26:58.707229 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 21 15:26:58 crc kubenswrapper[5021]: I0121 15:26:58.799767 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/02187944-ab37-42f4-898b-eced0c5a1059-catalog-content\") pod \"redhat-operators-svghk\" (UID: \"02187944-ab37-42f4-898b-eced0c5a1059\") " pod="openshift-marketplace/redhat-operators-svghk" Jan 21 15:26:58 crc kubenswrapper[5021]: I0121 15:26:58.799344 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/02187944-ab37-42f4-898b-eced0c5a1059-catalog-content\") pod \"redhat-operators-svghk\" (UID: \"02187944-ab37-42f4-898b-eced0c5a1059\") " pod="openshift-marketplace/redhat-operators-svghk" Jan 21 15:26:58 crc kubenswrapper[5021]: I0121 15:26:58.800082 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/02187944-ab37-42f4-898b-eced0c5a1059-utilities\") pod \"redhat-operators-svghk\" (UID: \"02187944-ab37-42f4-898b-eced0c5a1059\") " pod="openshift-marketplace/redhat-operators-svghk" Jan 21 15:26:58 crc kubenswrapper[5021]: I0121 15:26:58.800189 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gp9n2\" (UniqueName: \"kubernetes.io/projected/02187944-ab37-42f4-898b-eced0c5a1059-kube-api-access-gp9n2\") pod \"redhat-operators-svghk\" (UID: \"02187944-ab37-42f4-898b-eced0c5a1059\") " pod="openshift-marketplace/redhat-operators-svghk" Jan 21 15:26:58 crc kubenswrapper[5021]: I0121 15:26:58.800748 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/02187944-ab37-42f4-898b-eced0c5a1059-utilities\") pod \"redhat-operators-svghk\" (UID: \"02187944-ab37-42f4-898b-eced0c5a1059\") " pod="openshift-marketplace/redhat-operators-svghk" Jan 21 15:26:58 crc kubenswrapper[5021]: I0121 15:26:58.827339 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gp9n2\" (UniqueName: \"kubernetes.io/projected/02187944-ab37-42f4-898b-eced0c5a1059-kube-api-access-gp9n2\") pod \"redhat-operators-svghk\" (UID: \"02187944-ab37-42f4-898b-eced0c5a1059\") " pod="openshift-marketplace/redhat-operators-svghk" Jan 21 15:26:58 crc kubenswrapper[5021]: I0121 15:26:58.978013 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-svghk" Jan 21 15:26:59 crc kubenswrapper[5021]: I0121 15:26:59.042056 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-74dm9"] Jan 21 15:26:59 crc kubenswrapper[5021]: I0121 15:26:59.066164 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 21 15:26:59 crc kubenswrapper[5021]: W0121 15:26:59.096169 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode2e95c64_dd6b_4071_8cec_2832ccc612fe.slice/crio-885e2bbd87c4240b85a684eb50733081c2f648681a81737c1939dd9d4dc4d3b9 WatchSource:0}: Error finding container 885e2bbd87c4240b85a684eb50733081c2f648681a81737c1939dd9d4dc4d3b9: Status 404 returned error can't find the container with id 885e2bbd87c4240b85a684eb50733081c2f648681a81737c1939dd9d4dc4d3b9 Jan 21 15:26:59 crc kubenswrapper[5021]: W0121 15:26:59.097467 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod7e83d702_b89d_4f38_941e_1531fb0d868f.slice/crio-03d0613e9b5af26c1968c79dae0d0967984248dd8f647e19e8fcc5196b2cd638 WatchSource:0}: Error finding container 03d0613e9b5af26c1968c79dae0d0967984248dd8f647e19e8fcc5196b2cd638: Status 404 returned error can't find the container with id 03d0613e9b5af26c1968c79dae0d0967984248dd8f647e19e8fcc5196b2cd638 Jan 21 15:26:59 crc kubenswrapper[5021]: I0121 15:26:59.588328 5021 patch_prober.go:28] interesting pod/router-default-5444994796-4459k container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 15:26:59 crc kubenswrapper[5021]: [-]has-synced failed: reason withheld Jan 21 15:26:59 crc kubenswrapper[5021]: [+]process-running ok Jan 21 15:26:59 crc kubenswrapper[5021]: healthz check failed Jan 21 15:26:59 crc kubenswrapper[5021]: I0121 15:26:59.588882 5021 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4459k" podUID="c490e95d-e462-45b2-8352-9603283319e1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 15:26:59 crc kubenswrapper[5021]: I0121 15:26:59.590873 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-svghk"] Jan 21 15:26:59 crc kubenswrapper[5021]: W0121 15:26:59.634896 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod02187944_ab37_42f4_898b_eced0c5a1059.slice/crio-4eef0cfe17ff94f86b1eea1ff09a6a10420b6687fe32c1d85a805d71e8c43b2e WatchSource:0}: Error finding container 4eef0cfe17ff94f86b1eea1ff09a6a10420b6687fe32c1d85a805d71e8c43b2e: Status 404 returned error can't find the container with id 4eef0cfe17ff94f86b1eea1ff09a6a10420b6687fe32c1d85a805d71e8c43b2e Jan 21 15:26:59 crc kubenswrapper[5021]: I0121 15:26:59.790519 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"7e83d702-b89d-4f38-941e-1531fb0d868f","Type":"ContainerStarted","Data":"03d0613e9b5af26c1968c79dae0d0967984248dd8f647e19e8fcc5196b2cd638"} Jan 21 15:26:59 crc kubenswrapper[5021]: I0121 15:26:59.803190 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-74dm9" event={"ID":"e2e95c64-dd6b-4071-8cec-2832ccc612fe","Type":"ContainerStarted","Data":"885e2bbd87c4240b85a684eb50733081c2f648681a81737c1939dd9d4dc4d3b9"} Jan 21 15:26:59 crc kubenswrapper[5021]: I0121 15:26:59.814114 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-g8wp8" Jan 21 15:26:59 crc kubenswrapper[5021]: I0121 15:26:59.815447 5021 generic.go:334] "Generic (PLEG): container finished" podID="6dacec5e-eca8-4362-82e1-95c571054d9d" containerID="8b011be0440def67096984fb81ba6d2bddd3489480ed644485973b2acf0620b3" exitCode=0 Jan 21 15:26:59 crc kubenswrapper[5021]: I0121 15:26:59.815558 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xjfsb" event={"ID":"6dacec5e-eca8-4362-82e1-95c571054d9d","Type":"ContainerDied","Data":"8b011be0440def67096984fb81ba6d2bddd3489480ed644485973b2acf0620b3"} Jan 21 15:26:59 crc kubenswrapper[5021]: I0121 15:26:59.815590 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xjfsb" event={"ID":"6dacec5e-eca8-4362-82e1-95c571054d9d","Type":"ContainerStarted","Data":"5884b0f7cb4155b9bfd89d647dedf620f00271a9f9254a030fc28b391289f87f"} Jan 21 15:26:59 crc kubenswrapper[5021]: I0121 15:26:59.824477 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-g8wp8" Jan 21 15:26:59 crc kubenswrapper[5021]: I0121 15:26:59.839777 5021 generic.go:334] "Generic (PLEG): container finished" podID="f7b20493-5547-4a23-a8c4-411119736b50" containerID="ae92eb1c218afa919addafa83cd99eeca12614a69420a29f130b3a3337127886" exitCode=0 Jan 21 15:26:59 crc kubenswrapper[5021]: I0121 15:26:59.839882 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-g85c4" event={"ID":"f7b20493-5547-4a23-a8c4-411119736b50","Type":"ContainerDied","Data":"ae92eb1c218afa919addafa83cd99eeca12614a69420a29f130b3a3337127886"} Jan 21 15:26:59 crc kubenswrapper[5021]: I0121 15:26:59.947456 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlk6l" Jan 21 15:27:00 crc kubenswrapper[5021]: I0121 15:27:00.603245 5021 patch_prober.go:28] interesting pod/router-default-5444994796-4459k container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 15:27:00 crc kubenswrapper[5021]: [-]has-synced failed: reason withheld Jan 21 15:27:00 crc kubenswrapper[5021]: [+]process-running ok Jan 21 15:27:00 crc kubenswrapper[5021]: healthz check failed Jan 21 15:27:00 crc kubenswrapper[5021]: I0121 15:27:00.603895 5021 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4459k" podUID="c490e95d-e462-45b2-8352-9603283319e1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 15:27:00 crc kubenswrapper[5021]: I0121 15:27:00.886213 5021 generic.go:334] "Generic (PLEG): container finished" podID="e2e95c64-dd6b-4071-8cec-2832ccc612fe" containerID="90dd51da92b6899797d9b59aa29c7f1b1b1477da349c5e6277599d062cf1d118" exitCode=0 Jan 21 15:27:00 crc kubenswrapper[5021]: I0121 15:27:00.886303 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-74dm9" event={"ID":"e2e95c64-dd6b-4071-8cec-2832ccc612fe","Type":"ContainerDied","Data":"90dd51da92b6899797d9b59aa29c7f1b1b1477da349c5e6277599d062cf1d118"} Jan 21 15:27:00 crc kubenswrapper[5021]: I0121 15:27:00.987131 5021 generic.go:334] "Generic (PLEG): container finished" podID="71337f59-d4e5-47da-9d8e-759bd17cfdc3" containerID="a5bb51c915ed8831067aabe10257efffba87f9dc2079eb6e60ead96e294df68a" exitCode=0 Jan 21 15:27:00 crc kubenswrapper[5021]: I0121 15:27:00.987208 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29483475-7s6jf" event={"ID":"71337f59-d4e5-47da-9d8e-759bd17cfdc3","Type":"ContainerDied","Data":"a5bb51c915ed8831067aabe10257efffba87f9dc2079eb6e60ead96e294df68a"} Jan 21 15:27:00 crc kubenswrapper[5021]: I0121 15:27:00.993327 5021 generic.go:334] "Generic (PLEG): container finished" podID="02187944-ab37-42f4-898b-eced0c5a1059" containerID="601765261def215bd28bfaccff36ff0b7e1bcd81a4a9d09a0f29521a4ff1997b" exitCode=0 Jan 21 15:27:00 crc kubenswrapper[5021]: I0121 15:27:00.993457 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-svghk" event={"ID":"02187944-ab37-42f4-898b-eced0c5a1059","Type":"ContainerDied","Data":"601765261def215bd28bfaccff36ff0b7e1bcd81a4a9d09a0f29521a4ff1997b"} Jan 21 15:27:00 crc kubenswrapper[5021]: I0121 15:27:00.993486 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-svghk" event={"ID":"02187944-ab37-42f4-898b-eced0c5a1059","Type":"ContainerStarted","Data":"4eef0cfe17ff94f86b1eea1ff09a6a10420b6687fe32c1d85a805d71e8c43b2e"} Jan 21 15:27:00 crc kubenswrapper[5021]: I0121 15:27:00.998977 5021 generic.go:334] "Generic (PLEG): container finished" podID="7e83d702-b89d-4f38-941e-1531fb0d868f" containerID="9e3112472add2e361f4fe660832bd2e037a77bdb8c4b79fcdf74bb7d98bf131b" exitCode=0 Jan 21 15:27:00 crc kubenswrapper[5021]: I0121 15:27:00.999848 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"7e83d702-b89d-4f38-941e-1531fb0d868f","Type":"ContainerDied","Data":"9e3112472add2e361f4fe660832bd2e037a77bdb8c4b79fcdf74bb7d98bf131b"} Jan 21 15:27:01 crc kubenswrapper[5021]: I0121 15:27:01.158412 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 21 15:27:01 crc kubenswrapper[5021]: I0121 15:27:01.159999 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 21 15:27:01 crc kubenswrapper[5021]: I0121 15:27:01.162642 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Jan 21 15:27:01 crc kubenswrapper[5021]: I0121 15:27:01.170084 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Jan 21 15:27:01 crc kubenswrapper[5021]: I0121 15:27:01.171687 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 21 15:27:01 crc kubenswrapper[5021]: I0121 15:27:01.286974 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b30b3156-3b0e-4801-b561-e88b6593d6ca-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"b30b3156-3b0e-4801-b561-e88b6593d6ca\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 21 15:27:01 crc kubenswrapper[5021]: I0121 15:27:01.287056 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b30b3156-3b0e-4801-b561-e88b6593d6ca-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"b30b3156-3b0e-4801-b561-e88b6593d6ca\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 21 15:27:01 crc kubenswrapper[5021]: I0121 15:27:01.389716 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b30b3156-3b0e-4801-b561-e88b6593d6ca-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"b30b3156-3b0e-4801-b561-e88b6593d6ca\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 21 15:27:01 crc kubenswrapper[5021]: I0121 15:27:01.389879 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b30b3156-3b0e-4801-b561-e88b6593d6ca-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"b30b3156-3b0e-4801-b561-e88b6593d6ca\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 21 15:27:01 crc kubenswrapper[5021]: I0121 15:27:01.390067 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b30b3156-3b0e-4801-b561-e88b6593d6ca-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"b30b3156-3b0e-4801-b561-e88b6593d6ca\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 21 15:27:01 crc kubenswrapper[5021]: I0121 15:27:01.436124 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b30b3156-3b0e-4801-b561-e88b6593d6ca-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"b30b3156-3b0e-4801-b561-e88b6593d6ca\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 21 15:27:01 crc kubenswrapper[5021]: I0121 15:27:01.561363 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 21 15:27:01 crc kubenswrapper[5021]: I0121 15:27:01.583502 5021 patch_prober.go:28] interesting pod/router-default-5444994796-4459k container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 15:27:01 crc kubenswrapper[5021]: [-]has-synced failed: reason withheld Jan 21 15:27:01 crc kubenswrapper[5021]: [+]process-running ok Jan 21 15:27:01 crc kubenswrapper[5021]: healthz check failed Jan 21 15:27:01 crc kubenswrapper[5021]: I0121 15:27:01.583574 5021 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4459k" podUID="c490e95d-e462-45b2-8352-9603283319e1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 15:27:01 crc kubenswrapper[5021]: I0121 15:27:01.738034 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-vpddf" Jan 21 15:27:02 crc kubenswrapper[5021]: I0121 15:27:02.543333 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 21 15:27:02 crc kubenswrapper[5021]: I0121 15:27:02.551769 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 21 15:27:02 crc kubenswrapper[5021]: I0121 15:27:02.586970 5021 patch_prober.go:28] interesting pod/router-default-5444994796-4459k container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 15:27:02 crc kubenswrapper[5021]: [-]has-synced failed: reason withheld Jan 21 15:27:02 crc kubenswrapper[5021]: [+]process-running ok Jan 21 15:27:02 crc kubenswrapper[5021]: healthz check failed Jan 21 15:27:02 crc kubenswrapper[5021]: I0121 15:27:02.587055 5021 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4459k" podUID="c490e95d-e462-45b2-8352-9603283319e1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 15:27:02 crc kubenswrapper[5021]: I0121 15:27:02.591437 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483475-7s6jf" Jan 21 15:27:02 crc kubenswrapper[5021]: I0121 15:27:02.721121 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/71337f59-d4e5-47da-9d8e-759bd17cfdc3-secret-volume\") pod \"71337f59-d4e5-47da-9d8e-759bd17cfdc3\" (UID: \"71337f59-d4e5-47da-9d8e-759bd17cfdc3\") " Jan 21 15:27:02 crc kubenswrapper[5021]: I0121 15:27:02.721194 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7e83d702-b89d-4f38-941e-1531fb0d868f-kubelet-dir\") pod \"7e83d702-b89d-4f38-941e-1531fb0d868f\" (UID: \"7e83d702-b89d-4f38-941e-1531fb0d868f\") " Jan 21 15:27:02 crc kubenswrapper[5021]: I0121 15:27:02.721367 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/71337f59-d4e5-47da-9d8e-759bd17cfdc3-config-volume\") pod \"71337f59-d4e5-47da-9d8e-759bd17cfdc3\" (UID: \"71337f59-d4e5-47da-9d8e-759bd17cfdc3\") " Jan 21 15:27:02 crc kubenswrapper[5021]: I0121 15:27:02.721399 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7e83d702-b89d-4f38-941e-1531fb0d868f-kube-api-access\") pod \"7e83d702-b89d-4f38-941e-1531fb0d868f\" (UID: \"7e83d702-b89d-4f38-941e-1531fb0d868f\") " Jan 21 15:27:02 crc kubenswrapper[5021]: I0121 15:27:02.721453 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7pphp\" (UniqueName: \"kubernetes.io/projected/71337f59-d4e5-47da-9d8e-759bd17cfdc3-kube-api-access-7pphp\") pod \"71337f59-d4e5-47da-9d8e-759bd17cfdc3\" (UID: \"71337f59-d4e5-47da-9d8e-759bd17cfdc3\") " Jan 21 15:27:02 crc kubenswrapper[5021]: I0121 15:27:02.723608 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7e83d702-b89d-4f38-941e-1531fb0d868f-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "7e83d702-b89d-4f38-941e-1531fb0d868f" (UID: "7e83d702-b89d-4f38-941e-1531fb0d868f"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 15:27:02 crc kubenswrapper[5021]: I0121 15:27:02.724610 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/71337f59-d4e5-47da-9d8e-759bd17cfdc3-config-volume" (OuterVolumeSpecName: "config-volume") pod "71337f59-d4e5-47da-9d8e-759bd17cfdc3" (UID: "71337f59-d4e5-47da-9d8e-759bd17cfdc3"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:27:02 crc kubenswrapper[5021]: I0121 15:27:02.733742 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/71337f59-d4e5-47da-9d8e-759bd17cfdc3-kube-api-access-7pphp" (OuterVolumeSpecName: "kube-api-access-7pphp") pod "71337f59-d4e5-47da-9d8e-759bd17cfdc3" (UID: "71337f59-d4e5-47da-9d8e-759bd17cfdc3"). InnerVolumeSpecName "kube-api-access-7pphp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:27:02 crc kubenswrapper[5021]: I0121 15:27:02.733962 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e83d702-b89d-4f38-941e-1531fb0d868f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "7e83d702-b89d-4f38-941e-1531fb0d868f" (UID: "7e83d702-b89d-4f38-941e-1531fb0d868f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:27:02 crc kubenswrapper[5021]: I0121 15:27:02.734812 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/71337f59-d4e5-47da-9d8e-759bd17cfdc3-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "71337f59-d4e5-47da-9d8e-759bd17cfdc3" (UID: "71337f59-d4e5-47da-9d8e-759bd17cfdc3"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:27:02 crc kubenswrapper[5021]: I0121 15:27:02.822855 5021 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/71337f59-d4e5-47da-9d8e-759bd17cfdc3-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 21 15:27:02 crc kubenswrapper[5021]: I0121 15:27:02.822901 5021 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7e83d702-b89d-4f38-941e-1531fb0d868f-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 21 15:27:02 crc kubenswrapper[5021]: I0121 15:27:02.822929 5021 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/71337f59-d4e5-47da-9d8e-759bd17cfdc3-config-volume\") on node \"crc\" DevicePath \"\"" Jan 21 15:27:02 crc kubenswrapper[5021]: I0121 15:27:02.822969 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7e83d702-b89d-4f38-941e-1531fb0d868f-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 21 15:27:02 crc kubenswrapper[5021]: I0121 15:27:02.822980 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7pphp\" (UniqueName: \"kubernetes.io/projected/71337f59-d4e5-47da-9d8e-759bd17cfdc3-kube-api-access-7pphp\") on node \"crc\" DevicePath \"\"" Jan 21 15:27:03 crc kubenswrapper[5021]: I0121 15:27:03.058759 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29483475-7s6jf" event={"ID":"71337f59-d4e5-47da-9d8e-759bd17cfdc3","Type":"ContainerDied","Data":"3cd7b7671acfb344981d740d2faad191af2766683e3769f26297e1db92a5246a"} Jan 21 15:27:03 crc kubenswrapper[5021]: I0121 15:27:03.058812 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3cd7b7671acfb344981d740d2faad191af2766683e3769f26297e1db92a5246a" Jan 21 15:27:03 crc kubenswrapper[5021]: I0121 15:27:03.058807 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483475-7s6jf" Jan 21 15:27:03 crc kubenswrapper[5021]: I0121 15:27:03.100310 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"7e83d702-b89d-4f38-941e-1531fb0d868f","Type":"ContainerDied","Data":"03d0613e9b5af26c1968c79dae0d0967984248dd8f647e19e8fcc5196b2cd638"} Jan 21 15:27:03 crc kubenswrapper[5021]: I0121 15:27:03.100365 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="03d0613e9b5af26c1968c79dae0d0967984248dd8f647e19e8fcc5196b2cd638" Jan 21 15:27:03 crc kubenswrapper[5021]: I0121 15:27:03.100344 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 21 15:27:03 crc kubenswrapper[5021]: I0121 15:27:03.108996 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"b30b3156-3b0e-4801-b561-e88b6593d6ca","Type":"ContainerStarted","Data":"1c8d1ff86a2e217e387c4a8c9a4d15b8143a1e1596f9f2ab8d0ac11206acde94"} Jan 21 15:27:03 crc kubenswrapper[5021]: I0121 15:27:03.588019 5021 patch_prober.go:28] interesting pod/router-default-5444994796-4459k container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 15:27:03 crc kubenswrapper[5021]: [-]has-synced failed: reason withheld Jan 21 15:27:03 crc kubenswrapper[5021]: [+]process-running ok Jan 21 15:27:03 crc kubenswrapper[5021]: healthz check failed Jan 21 15:27:03 crc kubenswrapper[5021]: I0121 15:27:03.588483 5021 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4459k" podUID="c490e95d-e462-45b2-8352-9603283319e1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 15:27:03 crc kubenswrapper[5021]: I0121 15:27:03.973187 5021 patch_prober.go:28] interesting pod/console-f9d7485db-92qbd container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.16:8443/health\": dial tcp 10.217.0.16:8443: connect: connection refused" start-of-body= Jan 21 15:27:03 crc kubenswrapper[5021]: I0121 15:27:03.973254 5021 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-92qbd" podUID="d4b323b2-0188-4e04-ab45-bb9689a750a2" containerName="console" probeResult="failure" output="Get \"https://10.217.0.16:8443/health\": dial tcp 10.217.0.16:8443: connect: connection refused" Jan 21 15:27:04 crc kubenswrapper[5021]: I0121 15:27:04.063625 5021 patch_prober.go:28] interesting pod/downloads-7954f5f757-vzlcs container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" start-of-body= Jan 21 15:27:04 crc kubenswrapper[5021]: I0121 15:27:04.063708 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-vzlcs" podUID="1eae8258-0ffa-4aad-9ac4-747259f4cae0" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" Jan 21 15:27:04 crc kubenswrapper[5021]: I0121 15:27:04.063850 5021 patch_prober.go:28] interesting pod/downloads-7954f5f757-vzlcs container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" start-of-body= Jan 21 15:27:04 crc kubenswrapper[5021]: I0121 15:27:04.063954 5021 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-vzlcs" podUID="1eae8258-0ffa-4aad-9ac4-747259f4cae0" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" Jan 21 15:27:04 crc kubenswrapper[5021]: I0121 15:27:04.580752 5021 patch_prober.go:28] interesting pod/router-default-5444994796-4459k container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 15:27:04 crc kubenswrapper[5021]: [-]has-synced failed: reason withheld Jan 21 15:27:04 crc kubenswrapper[5021]: [+]process-running ok Jan 21 15:27:04 crc kubenswrapper[5021]: healthz check failed Jan 21 15:27:04 crc kubenswrapper[5021]: I0121 15:27:04.580812 5021 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4459k" podUID="c490e95d-e462-45b2-8352-9603283319e1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 15:27:04 crc kubenswrapper[5021]: I0121 15:27:04.959380 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/cb60592c-6770-457b-b2ae-2c6c8f2a4149-metrics-certs\") pod \"network-metrics-daemon-xtd2p\" (UID: \"cb60592c-6770-457b-b2ae-2c6c8f2a4149\") " pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:27:04 crc kubenswrapper[5021]: I0121 15:27:04.980934 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/cb60592c-6770-457b-b2ae-2c6c8f2a4149-metrics-certs\") pod \"network-metrics-daemon-xtd2p\" (UID: \"cb60592c-6770-457b-b2ae-2c6c8f2a4149\") " pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:27:05 crc kubenswrapper[5021]: I0121 15:27:05.161312 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-xtd2p" Jan 21 15:27:05 crc kubenswrapper[5021]: I0121 15:27:05.590750 5021 patch_prober.go:28] interesting pod/router-default-5444994796-4459k container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 21 15:27:05 crc kubenswrapper[5021]: [-]has-synced failed: reason withheld Jan 21 15:27:05 crc kubenswrapper[5021]: [+]process-running ok Jan 21 15:27:05 crc kubenswrapper[5021]: healthz check failed Jan 21 15:27:05 crc kubenswrapper[5021]: I0121 15:27:05.590837 5021 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4459k" podUID="c490e95d-e462-45b2-8352-9603283319e1" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 21 15:27:05 crc kubenswrapper[5021]: I0121 15:27:05.744494 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-xtd2p"] Jan 21 15:27:05 crc kubenswrapper[5021]: W0121 15:27:05.775015 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcb60592c_6770_457b_b2ae_2c6c8f2a4149.slice/crio-dd6d0b7212eaf5bcb5d460cb293a9380a8e5d3e80f5d3f981b679d70971c4e73 WatchSource:0}: Error finding container dd6d0b7212eaf5bcb5d460cb293a9380a8e5d3e80f5d3f981b679d70971c4e73: Status 404 returned error can't find the container with id dd6d0b7212eaf5bcb5d460cb293a9380a8e5d3e80f5d3f981b679d70971c4e73 Jan 21 15:27:06 crc kubenswrapper[5021]: I0121 15:27:06.183496 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"b30b3156-3b0e-4801-b561-e88b6593d6ca","Type":"ContainerStarted","Data":"b18cdcf6505e93bdd9611dda019d6e4b03ed350c0ba245ed1b80d1d187e14e4a"} Jan 21 15:27:06 crc kubenswrapper[5021]: I0121 15:27:06.194759 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-xtd2p" event={"ID":"cb60592c-6770-457b-b2ae-2c6c8f2a4149","Type":"ContainerStarted","Data":"dd6d0b7212eaf5bcb5d460cb293a9380a8e5d3e80f5d3f981b679d70971c4e73"} Jan 21 15:27:06 crc kubenswrapper[5021]: I0121 15:27:06.585896 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-4459k" Jan 21 15:27:06 crc kubenswrapper[5021]: I0121 15:27:06.596122 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-4459k" Jan 21 15:27:06 crc kubenswrapper[5021]: I0121 15:27:06.618732 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=5.61870212 podStartE2EDuration="5.61870212s" podCreationTimestamp="2026-01-21 15:27:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:27:06.206226165 +0000 UTC m=+167.741340074" watchObservedRunningTime="2026-01-21 15:27:06.61870212 +0000 UTC m=+168.153816029" Jan 21 15:27:07 crc kubenswrapper[5021]: I0121 15:27:07.249719 5021 generic.go:334] "Generic (PLEG): container finished" podID="b30b3156-3b0e-4801-b561-e88b6593d6ca" containerID="b18cdcf6505e93bdd9611dda019d6e4b03ed350c0ba245ed1b80d1d187e14e4a" exitCode=0 Jan 21 15:27:07 crc kubenswrapper[5021]: I0121 15:27:07.249801 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"b30b3156-3b0e-4801-b561-e88b6593d6ca","Type":"ContainerDied","Data":"b18cdcf6505e93bdd9611dda019d6e4b03ed350c0ba245ed1b80d1d187e14e4a"} Jan 21 15:27:07 crc kubenswrapper[5021]: I0121 15:27:07.260339 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-xtd2p" event={"ID":"cb60592c-6770-457b-b2ae-2c6c8f2a4149","Type":"ContainerStarted","Data":"5832532a91351ea66852e8bf9834feb1b3d830d4af180c15441722f72c3bba19"} Jan 21 15:27:08 crc kubenswrapper[5021]: I0121 15:27:08.276810 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-xtd2p" event={"ID":"cb60592c-6770-457b-b2ae-2c6c8f2a4149","Type":"ContainerStarted","Data":"e442145cabed0d2b33d4451432099c4aa6e5718ef700314dbd1cabbf70312e0b"} Jan 21 15:27:08 crc kubenswrapper[5021]: I0121 15:27:08.305427 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-xtd2p" podStartSLOduration=146.305406456 podStartE2EDuration="2m26.305406456s" podCreationTimestamp="2026-01-21 15:24:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:27:08.304345226 +0000 UTC m=+169.839459125" watchObservedRunningTime="2026-01-21 15:27:08.305406456 +0000 UTC m=+169.840520345" Jan 21 15:27:12 crc kubenswrapper[5021]: I0121 15:27:12.357570 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 15:27:12 crc kubenswrapper[5021]: I0121 15:27:12.358206 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 15:27:14 crc kubenswrapper[5021]: I0121 15:27:14.082205 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-vzlcs" Jan 21 15:27:14 crc kubenswrapper[5021]: I0121 15:27:14.372343 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-92qbd" Jan 21 15:27:14 crc kubenswrapper[5021]: I0121 15:27:14.377243 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-92qbd" Jan 21 15:27:16 crc kubenswrapper[5021]: I0121 15:27:16.356215 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:27:25 crc kubenswrapper[5021]: I0121 15:27:25.716416 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 21 15:27:26 crc kubenswrapper[5021]: I0121 15:27:26.120230 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 21 15:27:26 crc kubenswrapper[5021]: I0121 15:27:26.243111 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b30b3156-3b0e-4801-b561-e88b6593d6ca-kube-api-access\") pod \"b30b3156-3b0e-4801-b561-e88b6593d6ca\" (UID: \"b30b3156-3b0e-4801-b561-e88b6593d6ca\") " Jan 21 15:27:26 crc kubenswrapper[5021]: I0121 15:27:26.243195 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b30b3156-3b0e-4801-b561-e88b6593d6ca-kubelet-dir\") pod \"b30b3156-3b0e-4801-b561-e88b6593d6ca\" (UID: \"b30b3156-3b0e-4801-b561-e88b6593d6ca\") " Jan 21 15:27:26 crc kubenswrapper[5021]: I0121 15:27:26.243469 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b30b3156-3b0e-4801-b561-e88b6593d6ca-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "b30b3156-3b0e-4801-b561-e88b6593d6ca" (UID: "b30b3156-3b0e-4801-b561-e88b6593d6ca"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 15:27:26 crc kubenswrapper[5021]: I0121 15:27:26.251423 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b30b3156-3b0e-4801-b561-e88b6593d6ca-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "b30b3156-3b0e-4801-b561-e88b6593d6ca" (UID: "b30b3156-3b0e-4801-b561-e88b6593d6ca"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:27:26 crc kubenswrapper[5021]: I0121 15:27:26.345184 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b30b3156-3b0e-4801-b561-e88b6593d6ca-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 21 15:27:26 crc kubenswrapper[5021]: I0121 15:27:26.345226 5021 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b30b3156-3b0e-4801-b561-e88b6593d6ca-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 21 15:27:26 crc kubenswrapper[5021]: I0121 15:27:26.361880 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-7t8pz" Jan 21 15:27:26 crc kubenswrapper[5021]: I0121 15:27:26.399822 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"b30b3156-3b0e-4801-b561-e88b6593d6ca","Type":"ContainerDied","Data":"1c8d1ff86a2e217e387c4a8c9a4d15b8143a1e1596f9f2ab8d0ac11206acde94"} Jan 21 15:27:26 crc kubenswrapper[5021]: I0121 15:27:26.399888 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1c8d1ff86a2e217e387c4a8c9a4d15b8143a1e1596f9f2ab8d0ac11206acde94" Jan 21 15:27:26 crc kubenswrapper[5021]: I0121 15:27:26.399960 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 21 15:27:37 crc kubenswrapper[5021]: I0121 15:27:37.341796 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 21 15:27:37 crc kubenswrapper[5021]: E0121 15:27:37.342676 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e83d702-b89d-4f38-941e-1531fb0d868f" containerName="pruner" Jan 21 15:27:37 crc kubenswrapper[5021]: I0121 15:27:37.342692 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e83d702-b89d-4f38-941e-1531fb0d868f" containerName="pruner" Jan 21 15:27:37 crc kubenswrapper[5021]: E0121 15:27:37.342712 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b30b3156-3b0e-4801-b561-e88b6593d6ca" containerName="pruner" Jan 21 15:27:37 crc kubenswrapper[5021]: I0121 15:27:37.342720 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="b30b3156-3b0e-4801-b561-e88b6593d6ca" containerName="pruner" Jan 21 15:27:37 crc kubenswrapper[5021]: E0121 15:27:37.342741 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="71337f59-d4e5-47da-9d8e-759bd17cfdc3" containerName="collect-profiles" Jan 21 15:27:37 crc kubenswrapper[5021]: I0121 15:27:37.342753 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="71337f59-d4e5-47da-9d8e-759bd17cfdc3" containerName="collect-profiles" Jan 21 15:27:37 crc kubenswrapper[5021]: I0121 15:27:37.342881 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e83d702-b89d-4f38-941e-1531fb0d868f" containerName="pruner" Jan 21 15:27:37 crc kubenswrapper[5021]: I0121 15:27:37.342900 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="b30b3156-3b0e-4801-b561-e88b6593d6ca" containerName="pruner" Jan 21 15:27:37 crc kubenswrapper[5021]: I0121 15:27:37.342932 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="71337f59-d4e5-47da-9d8e-759bd17cfdc3" containerName="collect-profiles" Jan 21 15:27:37 crc kubenswrapper[5021]: I0121 15:27:37.343419 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 21 15:27:37 crc kubenswrapper[5021]: I0121 15:27:37.346940 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Jan 21 15:27:37 crc kubenswrapper[5021]: I0121 15:27:37.347141 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Jan 21 15:27:37 crc kubenswrapper[5021]: I0121 15:27:37.351001 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 21 15:27:37 crc kubenswrapper[5021]: I0121 15:27:37.513868 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9e49c7f2-932a-44a7-950e-0e71ba1cfc1a-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"9e49c7f2-932a-44a7-950e-0e71ba1cfc1a\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 21 15:27:37 crc kubenswrapper[5021]: I0121 15:27:37.514363 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/9e49c7f2-932a-44a7-950e-0e71ba1cfc1a-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"9e49c7f2-932a-44a7-950e-0e71ba1cfc1a\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 21 15:27:37 crc kubenswrapper[5021]: I0121 15:27:37.616046 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9e49c7f2-932a-44a7-950e-0e71ba1cfc1a-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"9e49c7f2-932a-44a7-950e-0e71ba1cfc1a\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 21 15:27:37 crc kubenswrapper[5021]: I0121 15:27:37.616852 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/9e49c7f2-932a-44a7-950e-0e71ba1cfc1a-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"9e49c7f2-932a-44a7-950e-0e71ba1cfc1a\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 21 15:27:37 crc kubenswrapper[5021]: I0121 15:27:37.617060 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/9e49c7f2-932a-44a7-950e-0e71ba1cfc1a-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"9e49c7f2-932a-44a7-950e-0e71ba1cfc1a\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 21 15:27:38 crc kubenswrapper[5021]: I0121 15:27:38.138754 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9e49c7f2-932a-44a7-950e-0e71ba1cfc1a-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"9e49c7f2-932a-44a7-950e-0e71ba1cfc1a\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 21 15:27:38 crc kubenswrapper[5021]: E0121 15:27:38.327731 5021 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Jan 21 15:27:38 crc kubenswrapper[5021]: E0121 15:27:38.328017 5021 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-vpf9d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-6sz7l_openshift-marketplace(85c51757-e7f5-487f-b873-a543118733b6): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 21 15:27:38 crc kubenswrapper[5021]: E0121 15:27:38.330092 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-6sz7l" podUID="85c51757-e7f5-487f-b873-a543118733b6" Jan 21 15:27:38 crc kubenswrapper[5021]: I0121 15:27:38.437460 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 21 15:27:42 crc kubenswrapper[5021]: I0121 15:27:42.357643 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 15:27:42 crc kubenswrapper[5021]: I0121 15:27:42.358280 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 15:27:42 crc kubenswrapper[5021]: I0121 15:27:42.747317 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 21 15:27:42 crc kubenswrapper[5021]: I0121 15:27:42.748191 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 21 15:27:42 crc kubenswrapper[5021]: I0121 15:27:42.754617 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 21 15:27:42 crc kubenswrapper[5021]: I0121 15:27:42.895262 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2a9fdb38-edc6-4315-a91c-9d8489695e24-kubelet-dir\") pod \"installer-9-crc\" (UID: \"2a9fdb38-edc6-4315-a91c-9d8489695e24\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 21 15:27:42 crc kubenswrapper[5021]: I0121 15:27:42.895360 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/2a9fdb38-edc6-4315-a91c-9d8489695e24-var-lock\") pod \"installer-9-crc\" (UID: \"2a9fdb38-edc6-4315-a91c-9d8489695e24\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 21 15:27:42 crc kubenswrapper[5021]: I0121 15:27:42.895402 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2a9fdb38-edc6-4315-a91c-9d8489695e24-kube-api-access\") pod \"installer-9-crc\" (UID: \"2a9fdb38-edc6-4315-a91c-9d8489695e24\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 21 15:27:42 crc kubenswrapper[5021]: I0121 15:27:42.996801 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/2a9fdb38-edc6-4315-a91c-9d8489695e24-var-lock\") pod \"installer-9-crc\" (UID: \"2a9fdb38-edc6-4315-a91c-9d8489695e24\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 21 15:27:42 crc kubenswrapper[5021]: I0121 15:27:42.997102 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2a9fdb38-edc6-4315-a91c-9d8489695e24-kube-api-access\") pod \"installer-9-crc\" (UID: \"2a9fdb38-edc6-4315-a91c-9d8489695e24\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 21 15:27:42 crc kubenswrapper[5021]: I0121 15:27:42.997212 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2a9fdb38-edc6-4315-a91c-9d8489695e24-kubelet-dir\") pod \"installer-9-crc\" (UID: \"2a9fdb38-edc6-4315-a91c-9d8489695e24\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 21 15:27:42 crc kubenswrapper[5021]: I0121 15:27:42.997351 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2a9fdb38-edc6-4315-a91c-9d8489695e24-kubelet-dir\") pod \"installer-9-crc\" (UID: \"2a9fdb38-edc6-4315-a91c-9d8489695e24\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 21 15:27:42 crc kubenswrapper[5021]: I0121 15:27:42.996975 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/2a9fdb38-edc6-4315-a91c-9d8489695e24-var-lock\") pod \"installer-9-crc\" (UID: \"2a9fdb38-edc6-4315-a91c-9d8489695e24\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 21 15:27:43 crc kubenswrapper[5021]: I0121 15:27:43.018108 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2a9fdb38-edc6-4315-a91c-9d8489695e24-kube-api-access\") pod \"installer-9-crc\" (UID: \"2a9fdb38-edc6-4315-a91c-9d8489695e24\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 21 15:27:43 crc kubenswrapper[5021]: I0121 15:27:43.066175 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 21 15:27:43 crc kubenswrapper[5021]: E0121 15:27:43.740580 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-6sz7l" podUID="85c51757-e7f5-487f-b873-a543118733b6" Jan 21 15:27:43 crc kubenswrapper[5021]: E0121 15:27:43.875845 5021 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Jan 21 15:27:43 crc kubenswrapper[5021]: E0121 15:27:43.876070 5021 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gp9n2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-svghk_openshift-marketplace(02187944-ab37-42f4-898b-eced0c5a1059): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 21 15:27:43 crc kubenswrapper[5021]: E0121 15:27:43.877367 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-svghk" podUID="02187944-ab37-42f4-898b-eced0c5a1059" Jan 21 15:27:44 crc kubenswrapper[5021]: E0121 15:27:44.204900 5021 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Jan 21 15:27:44 crc kubenswrapper[5021]: E0121 15:27:44.205097 5021 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gsjjr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-74dm9_openshift-marketplace(e2e95c64-dd6b-4071-8cec-2832ccc612fe): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 21 15:27:44 crc kubenswrapper[5021]: E0121 15:27:44.206254 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-74dm9" podUID="e2e95c64-dd6b-4071-8cec-2832ccc612fe" Jan 21 15:27:51 crc kubenswrapper[5021]: E0121 15:27:51.323601 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-74dm9" podUID="e2e95c64-dd6b-4071-8cec-2832ccc612fe" Jan 21 15:27:51 crc kubenswrapper[5021]: E0121 15:27:51.323792 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-svghk" podUID="02187944-ab37-42f4-898b-eced0c5a1059" Jan 21 15:27:51 crc kubenswrapper[5021]: E0121 15:27:51.387516 5021 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Jan 21 15:27:51 crc kubenswrapper[5021]: E0121 15:27:51.387703 5021 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-cqpp9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-ds2cq_openshift-marketplace(c3b8b814-c3f4-4494-a9c4-f08dc74d895f): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 21 15:27:51 crc kubenswrapper[5021]: E0121 15:27:51.390190 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-ds2cq" podUID="c3b8b814-c3f4-4494-a9c4-f08dc74d895f" Jan 21 15:27:51 crc kubenswrapper[5021]: E0121 15:27:51.556140 5021 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Jan 21 15:27:51 crc kubenswrapper[5021]: E0121 15:27:51.556598 5021 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8fv27,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-29tp8_openshift-marketplace(af36dabe-9fde-4042-b317-6568e27fee70): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 21 15:27:51 crc kubenswrapper[5021]: E0121 15:27:51.558404 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-29tp8" podUID="af36dabe-9fde-4042-b317-6568e27fee70" Jan 21 15:27:53 crc kubenswrapper[5021]: E0121 15:27:53.123852 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-ds2cq" podUID="c3b8b814-c3f4-4494-a9c4-f08dc74d895f" Jan 21 15:27:53 crc kubenswrapper[5021]: E0121 15:27:53.123869 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-29tp8" podUID="af36dabe-9fde-4042-b317-6568e27fee70" Jan 21 15:27:53 crc kubenswrapper[5021]: E0121 15:27:53.184202 5021 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Jan 21 15:27:53 crc kubenswrapper[5021]: E0121 15:27:53.184395 5021 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qrml2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-g85c4_openshift-marketplace(f7b20493-5547-4a23-a8c4-411119736b50): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 21 15:27:53 crc kubenswrapper[5021]: E0121 15:27:53.185631 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-g85c4" podUID="f7b20493-5547-4a23-a8c4-411119736b50" Jan 21 15:27:53 crc kubenswrapper[5021]: E0121 15:27:53.218049 5021 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Jan 21 15:27:53 crc kubenswrapper[5021]: E0121 15:27:53.218641 5021 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-bt8s2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-629pj_openshift-marketplace(b2566bd0-1929-44cb-93b4-f09cc52bf852): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 21 15:27:53 crc kubenswrapper[5021]: E0121 15:27:53.220094 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-629pj" podUID="b2566bd0-1929-44cb-93b4-f09cc52bf852" Jan 21 15:27:53 crc kubenswrapper[5021]: E0121 15:27:53.227690 5021 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Jan 21 15:27:53 crc kubenswrapper[5021]: E0121 15:27:53.227810 5021 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-p2q9v,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-xjfsb_openshift-marketplace(6dacec5e-eca8-4362-82e1-95c571054d9d): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 21 15:27:53 crc kubenswrapper[5021]: E0121 15:27:53.228952 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-xjfsb" podUID="6dacec5e-eca8-4362-82e1-95c571054d9d" Jan 21 15:27:53 crc kubenswrapper[5021]: I0121 15:27:53.413888 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 21 15:27:53 crc kubenswrapper[5021]: I0121 15:27:53.543644 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 21 15:27:53 crc kubenswrapper[5021]: I0121 15:27:53.551698 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"2a9fdb38-edc6-4315-a91c-9d8489695e24","Type":"ContainerStarted","Data":"f8b209fe15db0a8e419900b6e2120eacf73824b72b3fdfcb0e9aedf95dcfe285"} Jan 21 15:27:53 crc kubenswrapper[5021]: W0121 15:27:53.554110 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod9e49c7f2_932a_44a7_950e_0e71ba1cfc1a.slice/crio-8e4d7234707e5b58c6343c2cb9cd5161b67c92b0225955ab8ffaa236b2d57c8d WatchSource:0}: Error finding container 8e4d7234707e5b58c6343c2cb9cd5161b67c92b0225955ab8ffaa236b2d57c8d: Status 404 returned error can't find the container with id 8e4d7234707e5b58c6343c2cb9cd5161b67c92b0225955ab8ffaa236b2d57c8d Jan 21 15:27:53 crc kubenswrapper[5021]: E0121 15:27:53.554327 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-629pj" podUID="b2566bd0-1929-44cb-93b4-f09cc52bf852" Jan 21 15:27:53 crc kubenswrapper[5021]: E0121 15:27:53.554404 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-xjfsb" podUID="6dacec5e-eca8-4362-82e1-95c571054d9d" Jan 21 15:27:53 crc kubenswrapper[5021]: E0121 15:27:53.555555 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-g85c4" podUID="f7b20493-5547-4a23-a8c4-411119736b50" Jan 21 15:27:54 crc kubenswrapper[5021]: I0121 15:27:54.558208 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"2a9fdb38-edc6-4315-a91c-9d8489695e24","Type":"ContainerStarted","Data":"5ab8ef4a9e4538f88456201fe6ea5cb38a6766ab02bb01eb73e844216d336304"} Jan 21 15:27:54 crc kubenswrapper[5021]: I0121 15:27:54.560017 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"9e49c7f2-932a-44a7-950e-0e71ba1cfc1a","Type":"ContainerStarted","Data":"8a59ccf69b5cbf30247767927958ba22981841113c1f373624b8af2431df96fd"} Jan 21 15:27:54 crc kubenswrapper[5021]: I0121 15:27:54.560048 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"9e49c7f2-932a-44a7-950e-0e71ba1cfc1a","Type":"ContainerStarted","Data":"8e4d7234707e5b58c6343c2cb9cd5161b67c92b0225955ab8ffaa236b2d57c8d"} Jan 21 15:27:54 crc kubenswrapper[5021]: I0121 15:27:54.591221 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-9-crc" podStartSLOduration=17.591200218 podStartE2EDuration="17.591200218s" podCreationTimestamp="2026-01-21 15:27:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:27:54.589683123 +0000 UTC m=+216.124797012" watchObservedRunningTime="2026-01-21 15:27:54.591200218 +0000 UTC m=+216.126314107" Jan 21 15:27:54 crc kubenswrapper[5021]: I0121 15:27:54.594310 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=12.594295043 podStartE2EDuration="12.594295043s" podCreationTimestamp="2026-01-21 15:27:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:27:54.574130823 +0000 UTC m=+216.109244712" watchObservedRunningTime="2026-01-21 15:27:54.594295043 +0000 UTC m=+216.129408932" Jan 21 15:27:55 crc kubenswrapper[5021]: I0121 15:27:55.566950 5021 generic.go:334] "Generic (PLEG): container finished" podID="9e49c7f2-932a-44a7-950e-0e71ba1cfc1a" containerID="8a59ccf69b5cbf30247767927958ba22981841113c1f373624b8af2431df96fd" exitCode=0 Jan 21 15:27:55 crc kubenswrapper[5021]: I0121 15:27:55.567005 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"9e49c7f2-932a-44a7-950e-0e71ba1cfc1a","Type":"ContainerDied","Data":"8a59ccf69b5cbf30247767927958ba22981841113c1f373624b8af2431df96fd"} Jan 21 15:27:56 crc kubenswrapper[5021]: I0121 15:27:56.574636 5021 generic.go:334] "Generic (PLEG): container finished" podID="85c51757-e7f5-487f-b873-a543118733b6" containerID="03389cea96dde3a168d28bc41734f72658b38df0a300d504cf16bcfa82e065c2" exitCode=0 Jan 21 15:27:56 crc kubenswrapper[5021]: I0121 15:27:56.575129 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6sz7l" event={"ID":"85c51757-e7f5-487f-b873-a543118733b6","Type":"ContainerDied","Data":"03389cea96dde3a168d28bc41734f72658b38df0a300d504cf16bcfa82e065c2"} Jan 21 15:27:56 crc kubenswrapper[5021]: I0121 15:27:56.839286 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 21 15:27:56 crc kubenswrapper[5021]: I0121 15:27:56.940427 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/9e49c7f2-932a-44a7-950e-0e71ba1cfc1a-kubelet-dir\") pod \"9e49c7f2-932a-44a7-950e-0e71ba1cfc1a\" (UID: \"9e49c7f2-932a-44a7-950e-0e71ba1cfc1a\") " Jan 21 15:27:56 crc kubenswrapper[5021]: I0121 15:27:56.940542 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9e49c7f2-932a-44a7-950e-0e71ba1cfc1a-kube-api-access\") pod \"9e49c7f2-932a-44a7-950e-0e71ba1cfc1a\" (UID: \"9e49c7f2-932a-44a7-950e-0e71ba1cfc1a\") " Jan 21 15:27:56 crc kubenswrapper[5021]: I0121 15:27:56.941462 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9e49c7f2-932a-44a7-950e-0e71ba1cfc1a-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "9e49c7f2-932a-44a7-950e-0e71ba1cfc1a" (UID: "9e49c7f2-932a-44a7-950e-0e71ba1cfc1a"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 15:27:56 crc kubenswrapper[5021]: I0121 15:27:56.946062 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9e49c7f2-932a-44a7-950e-0e71ba1cfc1a-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "9e49c7f2-932a-44a7-950e-0e71ba1cfc1a" (UID: "9e49c7f2-932a-44a7-950e-0e71ba1cfc1a"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:27:57 crc kubenswrapper[5021]: I0121 15:27:57.041565 5021 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/9e49c7f2-932a-44a7-950e-0e71ba1cfc1a-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 21 15:27:57 crc kubenswrapper[5021]: I0121 15:27:57.041603 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9e49c7f2-932a-44a7-950e-0e71ba1cfc1a-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 21 15:27:57 crc kubenswrapper[5021]: I0121 15:27:57.582625 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"9e49c7f2-932a-44a7-950e-0e71ba1cfc1a","Type":"ContainerDied","Data":"8e4d7234707e5b58c6343c2cb9cd5161b67c92b0225955ab8ffaa236b2d57c8d"} Jan 21 15:27:57 crc kubenswrapper[5021]: I0121 15:27:57.582668 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 21 15:27:57 crc kubenswrapper[5021]: I0121 15:27:57.582919 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8e4d7234707e5b58c6343c2cb9cd5161b67c92b0225955ab8ffaa236b2d57c8d" Jan 21 15:27:57 crc kubenswrapper[5021]: I0121 15:27:57.601177 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6sz7l" event={"ID":"85c51757-e7f5-487f-b873-a543118733b6","Type":"ContainerStarted","Data":"25209e10f6a6ebcd7ff92784ba20d40a2ab48f9b4460622c55d38d8d1527064b"} Jan 21 15:27:57 crc kubenswrapper[5021]: I0121 15:27:57.618789 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-6sz7l" podStartSLOduration=2.499418675 podStartE2EDuration="1m2.61877386s" podCreationTimestamp="2026-01-21 15:26:55 +0000 UTC" firstStartedPulling="2026-01-21 15:26:57.253784294 +0000 UTC m=+158.788898183" lastFinishedPulling="2026-01-21 15:27:57.373139489 +0000 UTC m=+218.908253368" observedRunningTime="2026-01-21 15:27:57.617557923 +0000 UTC m=+219.152671832" watchObservedRunningTime="2026-01-21 15:27:57.61877386 +0000 UTC m=+219.153887749" Jan 21 15:28:04 crc kubenswrapper[5021]: I0121 15:28:04.637945 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-74dm9" event={"ID":"e2e95c64-dd6b-4071-8cec-2832ccc612fe","Type":"ContainerStarted","Data":"35fcc96f3effb134c219b8e5a85910a1fc6fc3dfc044648f853220d3227668c9"} Jan 21 15:28:05 crc kubenswrapper[5021]: I0121 15:28:05.393180 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-6sz7l" Jan 21 15:28:05 crc kubenswrapper[5021]: I0121 15:28:05.393581 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-6sz7l" Jan 21 15:28:05 crc kubenswrapper[5021]: I0121 15:28:05.455382 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-6sz7l" Jan 21 15:28:05 crc kubenswrapper[5021]: I0121 15:28:05.646546 5021 generic.go:334] "Generic (PLEG): container finished" podID="02187944-ab37-42f4-898b-eced0c5a1059" containerID="b14a64342a899e361781fffb52baa1f918dc0ed5e15199e860bc597aaa53e9b0" exitCode=0 Jan 21 15:28:05 crc kubenswrapper[5021]: I0121 15:28:05.646726 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-svghk" event={"ID":"02187944-ab37-42f4-898b-eced0c5a1059","Type":"ContainerDied","Data":"b14a64342a899e361781fffb52baa1f918dc0ed5e15199e860bc597aaa53e9b0"} Jan 21 15:28:05 crc kubenswrapper[5021]: I0121 15:28:05.653951 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ds2cq" event={"ID":"c3b8b814-c3f4-4494-a9c4-f08dc74d895f","Type":"ContainerStarted","Data":"3580652171defab3c78557cec3b6cd1e23bf72d49dff3a474eb5ac956dde65b3"} Jan 21 15:28:05 crc kubenswrapper[5021]: I0121 15:28:05.657214 5021 generic.go:334] "Generic (PLEG): container finished" podID="e2e95c64-dd6b-4071-8cec-2832ccc612fe" containerID="35fcc96f3effb134c219b8e5a85910a1fc6fc3dfc044648f853220d3227668c9" exitCode=0 Jan 21 15:28:05 crc kubenswrapper[5021]: I0121 15:28:05.657460 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-74dm9" event={"ID":"e2e95c64-dd6b-4071-8cec-2832ccc612fe","Type":"ContainerDied","Data":"35fcc96f3effb134c219b8e5a85910a1fc6fc3dfc044648f853220d3227668c9"} Jan 21 15:28:05 crc kubenswrapper[5021]: I0121 15:28:05.703212 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-6sz7l" Jan 21 15:28:06 crc kubenswrapper[5021]: I0121 15:28:06.664969 5021 generic.go:334] "Generic (PLEG): container finished" podID="c3b8b814-c3f4-4494-a9c4-f08dc74d895f" containerID="3580652171defab3c78557cec3b6cd1e23bf72d49dff3a474eb5ac956dde65b3" exitCode=0 Jan 21 15:28:06 crc kubenswrapper[5021]: I0121 15:28:06.665049 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ds2cq" event={"ID":"c3b8b814-c3f4-4494-a9c4-f08dc74d895f","Type":"ContainerDied","Data":"3580652171defab3c78557cec3b6cd1e23bf72d49dff3a474eb5ac956dde65b3"} Jan 21 15:28:07 crc kubenswrapper[5021]: I0121 15:28:07.675930 5021 generic.go:334] "Generic (PLEG): container finished" podID="f7b20493-5547-4a23-a8c4-411119736b50" containerID="2769b461654fa2b7718f0fd57c1436766474f0766264b2435b6bf4763b66cacd" exitCode=0 Jan 21 15:28:07 crc kubenswrapper[5021]: I0121 15:28:07.676121 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-g85c4" event={"ID":"f7b20493-5547-4a23-a8c4-411119736b50","Type":"ContainerDied","Data":"2769b461654fa2b7718f0fd57c1436766474f0766264b2435b6bf4763b66cacd"} Jan 21 15:28:07 crc kubenswrapper[5021]: I0121 15:28:07.681018 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-svghk" event={"ID":"02187944-ab37-42f4-898b-eced0c5a1059","Type":"ContainerStarted","Data":"a09e091b45b08c8e60becdf125c47a531ebadc2512ab2130b9846ff59e212bf8"} Jan 21 15:28:07 crc kubenswrapper[5021]: I0121 15:28:07.683493 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-74dm9" event={"ID":"e2e95c64-dd6b-4071-8cec-2832ccc612fe","Type":"ContainerStarted","Data":"4580e7f8be83b39fe1a73fb80e38e5f1a77ffbade5b8b4ee153d73fa446eb34f"} Jan 21 15:28:07 crc kubenswrapper[5021]: I0121 15:28:07.724072 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-74dm9" podStartSLOduration=4.602518728 podStartE2EDuration="1m9.724046116s" podCreationTimestamp="2026-01-21 15:26:58 +0000 UTC" firstStartedPulling="2026-01-21 15:27:00.906606362 +0000 UTC m=+162.441720251" lastFinishedPulling="2026-01-21 15:28:06.02813375 +0000 UTC m=+227.563247639" observedRunningTime="2026-01-21 15:28:07.712531689 +0000 UTC m=+229.247645578" watchObservedRunningTime="2026-01-21 15:28:07.724046116 +0000 UTC m=+229.259160035" Jan 21 15:28:08 crc kubenswrapper[5021]: I0121 15:28:08.585949 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-74dm9" Jan 21 15:28:08 crc kubenswrapper[5021]: I0121 15:28:08.586471 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-74dm9" Jan 21 15:28:08 crc kubenswrapper[5021]: I0121 15:28:08.689676 5021 generic.go:334] "Generic (PLEG): container finished" podID="b2566bd0-1929-44cb-93b4-f09cc52bf852" containerID="d53c1ada960e0d0e2749a4b265b2c41409a31b6493cedca94c4bd351bfa37ef9" exitCode=0 Jan 21 15:28:08 crc kubenswrapper[5021]: I0121 15:28:08.689761 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-629pj" event={"ID":"b2566bd0-1929-44cb-93b4-f09cc52bf852","Type":"ContainerDied","Data":"d53c1ada960e0d0e2749a4b265b2c41409a31b6493cedca94c4bd351bfa37ef9"} Jan 21 15:28:08 crc kubenswrapper[5021]: I0121 15:28:08.692553 5021 generic.go:334] "Generic (PLEG): container finished" podID="6dacec5e-eca8-4362-82e1-95c571054d9d" containerID="91f5c3828a7794c41c871d9d8e499047e10bcc601d85100f829729d98893c2ee" exitCode=0 Jan 21 15:28:08 crc kubenswrapper[5021]: I0121 15:28:08.692656 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xjfsb" event={"ID":"6dacec5e-eca8-4362-82e1-95c571054d9d","Type":"ContainerDied","Data":"91f5c3828a7794c41c871d9d8e499047e10bcc601d85100f829729d98893c2ee"} Jan 21 15:28:08 crc kubenswrapper[5021]: I0121 15:28:08.695406 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-g85c4" event={"ID":"f7b20493-5547-4a23-a8c4-411119736b50","Type":"ContainerStarted","Data":"024b6e1cbf95b0451e9d09bd9b6fa3e50a2527bcb9c929168807890b4daca109"} Jan 21 15:28:08 crc kubenswrapper[5021]: I0121 15:28:08.723731 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-svghk" podStartSLOduration=4.656480673 podStartE2EDuration="1m10.723701032s" podCreationTimestamp="2026-01-21 15:26:58 +0000 UTC" firstStartedPulling="2026-01-21 15:27:00.994839121 +0000 UTC m=+162.529953010" lastFinishedPulling="2026-01-21 15:28:07.06205948 +0000 UTC m=+228.597173369" observedRunningTime="2026-01-21 15:28:07.748591308 +0000 UTC m=+229.283705217" watchObservedRunningTime="2026-01-21 15:28:08.723701032 +0000 UTC m=+230.258814931" Jan 21 15:28:08 crc kubenswrapper[5021]: I0121 15:28:08.765167 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-g85c4" podStartSLOduration=1.9993817900000002 podStartE2EDuration="1m11.765144784s" podCreationTimestamp="2026-01-21 15:26:57 +0000 UTC" firstStartedPulling="2026-01-21 15:26:58.424122474 +0000 UTC m=+159.959236363" lastFinishedPulling="2026-01-21 15:28:08.189885468 +0000 UTC m=+229.724999357" observedRunningTime="2026-01-21 15:28:08.762566396 +0000 UTC m=+230.297680285" watchObservedRunningTime="2026-01-21 15:28:08.765144784 +0000 UTC m=+230.300258673" Jan 21 15:28:08 crc kubenswrapper[5021]: I0121 15:28:08.979609 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-svghk" Jan 21 15:28:08 crc kubenswrapper[5021]: I0121 15:28:08.979673 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-svghk" Jan 21 15:28:09 crc kubenswrapper[5021]: I0121 15:28:09.633376 5021 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-74dm9" podUID="e2e95c64-dd6b-4071-8cec-2832ccc612fe" containerName="registry-server" probeResult="failure" output=< Jan 21 15:28:09 crc kubenswrapper[5021]: timeout: failed to connect service ":50051" within 1s Jan 21 15:28:09 crc kubenswrapper[5021]: > Jan 21 15:28:09 crc kubenswrapper[5021]: I0121 15:28:09.703851 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xjfsb" event={"ID":"6dacec5e-eca8-4362-82e1-95c571054d9d","Type":"ContainerStarted","Data":"0ae1954ae4f1dd11e8fcce8f0d7266533f88c18ac6be4e6fa8cb9de1e85f3366"} Jan 21 15:28:09 crc kubenswrapper[5021]: I0121 15:28:09.721872 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-629pj" event={"ID":"b2566bd0-1929-44cb-93b4-f09cc52bf852","Type":"ContainerStarted","Data":"d95eacb462ea8b501e59d681c0ef0cd51ffd449b8bfa4c2deb03fa896053bc58"} Jan 21 15:28:09 crc kubenswrapper[5021]: I0121 15:28:09.739598 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-xjfsb" podStartSLOduration=3.43621952 podStartE2EDuration="1m12.739573007s" podCreationTimestamp="2026-01-21 15:26:57 +0000 UTC" firstStartedPulling="2026-01-21 15:26:59.824065349 +0000 UTC m=+161.359179248" lastFinishedPulling="2026-01-21 15:28:09.127418856 +0000 UTC m=+230.662532735" observedRunningTime="2026-01-21 15:28:09.728025128 +0000 UTC m=+231.263139037" watchObservedRunningTime="2026-01-21 15:28:09.739573007 +0000 UTC m=+231.274686896" Jan 21 15:28:09 crc kubenswrapper[5021]: I0121 15:28:09.756049 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-629pj" podStartSLOduration=4.040178453 podStartE2EDuration="1m14.756024973s" podCreationTimestamp="2026-01-21 15:26:55 +0000 UTC" firstStartedPulling="2026-01-21 15:26:58.349182942 +0000 UTC m=+159.884296831" lastFinishedPulling="2026-01-21 15:28:09.065029462 +0000 UTC m=+230.600143351" observedRunningTime="2026-01-21 15:28:09.753368043 +0000 UTC m=+231.288481942" watchObservedRunningTime="2026-01-21 15:28:09.756024973 +0000 UTC m=+231.291138862" Jan 21 15:28:10 crc kubenswrapper[5021]: I0121 15:28:10.043567 5021 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-svghk" podUID="02187944-ab37-42f4-898b-eced0c5a1059" containerName="registry-server" probeResult="failure" output=< Jan 21 15:28:10 crc kubenswrapper[5021]: timeout: failed to connect service ":50051" within 1s Jan 21 15:28:10 crc kubenswrapper[5021]: > Jan 21 15:28:10 crc kubenswrapper[5021]: I0121 15:28:10.729738 5021 generic.go:334] "Generic (PLEG): container finished" podID="af36dabe-9fde-4042-b317-6568e27fee70" containerID="0c560f35715802962e7390cbb1f41cd843130d34979d199364a1a7dc4fcbd700" exitCode=0 Jan 21 15:28:10 crc kubenswrapper[5021]: I0121 15:28:10.729784 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-29tp8" event={"ID":"af36dabe-9fde-4042-b317-6568e27fee70","Type":"ContainerDied","Data":"0c560f35715802962e7390cbb1f41cd843130d34979d199364a1a7dc4fcbd700"} Jan 21 15:28:12 crc kubenswrapper[5021]: I0121 15:28:12.357239 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 15:28:12 crc kubenswrapper[5021]: I0121 15:28:12.357629 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 15:28:12 crc kubenswrapper[5021]: I0121 15:28:12.357684 5021 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" Jan 21 15:28:12 crc kubenswrapper[5021]: I0121 15:28:12.358416 5021 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e4c5895dd4f412ff99b134a78d67b65a3792a50da3f9a88407af678092a29d5b"} pod="openshift-machine-config-operator/machine-config-daemon-n22xz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 21 15:28:12 crc kubenswrapper[5021]: I0121 15:28:12.358522 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" containerID="cri-o://e4c5895dd4f412ff99b134a78d67b65a3792a50da3f9a88407af678092a29d5b" gracePeriod=600 Jan 21 15:28:12 crc kubenswrapper[5021]: I0121 15:28:12.753309 5021 generic.go:334] "Generic (PLEG): container finished" podID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerID="e4c5895dd4f412ff99b134a78d67b65a3792a50da3f9a88407af678092a29d5b" exitCode=0 Jan 21 15:28:12 crc kubenswrapper[5021]: I0121 15:28:12.753404 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" event={"ID":"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1","Type":"ContainerDied","Data":"e4c5895dd4f412ff99b134a78d67b65a3792a50da3f9a88407af678092a29d5b"} Jan 21 15:28:12 crc kubenswrapper[5021]: I0121 15:28:12.753721 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" event={"ID":"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1","Type":"ContainerStarted","Data":"c8a334c0d1d06dd287b03b7bbb8d0161ca638f7571e2c8577f63c3b4d6b95f04"} Jan 21 15:28:12 crc kubenswrapper[5021]: I0121 15:28:12.757700 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ds2cq" event={"ID":"c3b8b814-c3f4-4494-a9c4-f08dc74d895f","Type":"ContainerStarted","Data":"00a4badefd8aa5a4013a834ec5479a683b4d6d49d13ffebbba339227cb30a2e2"} Jan 21 15:28:12 crc kubenswrapper[5021]: I0121 15:28:12.760629 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-29tp8" event={"ID":"af36dabe-9fde-4042-b317-6568e27fee70","Type":"ContainerStarted","Data":"4b324ee4cd8577ce10f68c2b331e64bd9feb40f12a6f53952b042bf8f0bcad35"} Jan 21 15:28:12 crc kubenswrapper[5021]: I0121 15:28:12.797172 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-29tp8" podStartSLOduration=4.521372654 podStartE2EDuration="1m17.797154093s" podCreationTimestamp="2026-01-21 15:26:55 +0000 UTC" firstStartedPulling="2026-01-21 15:26:58.405040484 +0000 UTC m=+159.940154373" lastFinishedPulling="2026-01-21 15:28:11.680821923 +0000 UTC m=+233.215935812" observedRunningTime="2026-01-21 15:28:12.796770031 +0000 UTC m=+234.331883920" watchObservedRunningTime="2026-01-21 15:28:12.797154093 +0000 UTC m=+234.332267982" Jan 21 15:28:12 crc kubenswrapper[5021]: I0121 15:28:12.824632 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-ds2cq" podStartSLOduration=3.326539282 podStartE2EDuration="1m17.824611952s" podCreationTimestamp="2026-01-21 15:26:55 +0000 UTC" firstStartedPulling="2026-01-21 15:26:57.2458747 +0000 UTC m=+158.780988589" lastFinishedPulling="2026-01-21 15:28:11.74394737 +0000 UTC m=+233.279061259" observedRunningTime="2026-01-21 15:28:12.819832799 +0000 UTC m=+234.354946688" watchObservedRunningTime="2026-01-21 15:28:12.824611952 +0000 UTC m=+234.359725841" Jan 21 15:28:15 crc kubenswrapper[5021]: I0121 15:28:15.572220 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-ds2cq" Jan 21 15:28:15 crc kubenswrapper[5021]: I0121 15:28:15.572819 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-ds2cq" Jan 21 15:28:15 crc kubenswrapper[5021]: I0121 15:28:15.620775 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-ds2cq" Jan 21 15:28:15 crc kubenswrapper[5021]: I0121 15:28:15.812389 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-29tp8" Jan 21 15:28:15 crc kubenswrapper[5021]: I0121 15:28:15.812799 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-29tp8" Jan 21 15:28:15 crc kubenswrapper[5021]: I0121 15:28:15.852174 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-29tp8" Jan 21 15:28:15 crc kubenswrapper[5021]: I0121 15:28:15.993413 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-629pj" Jan 21 15:28:15 crc kubenswrapper[5021]: I0121 15:28:15.993467 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-629pj" Jan 21 15:28:16 crc kubenswrapper[5021]: I0121 15:28:16.034432 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-629pj" Jan 21 15:28:16 crc kubenswrapper[5021]: I0121 15:28:16.822420 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-29tp8" Jan 21 15:28:16 crc kubenswrapper[5021]: I0121 15:28:16.827650 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-629pj" Jan 21 15:28:17 crc kubenswrapper[5021]: I0121 15:28:17.585385 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-629pj"] Jan 21 15:28:17 crc kubenswrapper[5021]: I0121 15:28:17.613526 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-g85c4" Jan 21 15:28:17 crc kubenswrapper[5021]: I0121 15:28:17.613617 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-g85c4" Jan 21 15:28:17 crc kubenswrapper[5021]: I0121 15:28:17.660000 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-g85c4" Jan 21 15:28:17 crc kubenswrapper[5021]: I0121 15:28:17.834612 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-g85c4" Jan 21 15:28:17 crc kubenswrapper[5021]: I0121 15:28:17.911734 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-xjfsb" Jan 21 15:28:17 crc kubenswrapper[5021]: I0121 15:28:17.911842 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-xjfsb" Jan 21 15:28:17 crc kubenswrapper[5021]: I0121 15:28:17.959977 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-xjfsb" Jan 21 15:28:18 crc kubenswrapper[5021]: I0121 15:28:18.186928 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-29tp8"] Jan 21 15:28:18 crc kubenswrapper[5021]: I0121 15:28:18.624356 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-74dm9" Jan 21 15:28:18 crc kubenswrapper[5021]: I0121 15:28:18.664729 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-74dm9" Jan 21 15:28:18 crc kubenswrapper[5021]: I0121 15:28:18.791687 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-629pj" podUID="b2566bd0-1929-44cb-93b4-f09cc52bf852" containerName="registry-server" containerID="cri-o://d95eacb462ea8b501e59d681c0ef0cd51ffd449b8bfa4c2deb03fa896053bc58" gracePeriod=2 Jan 21 15:28:18 crc kubenswrapper[5021]: I0121 15:28:18.792439 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-29tp8" podUID="af36dabe-9fde-4042-b317-6568e27fee70" containerName="registry-server" containerID="cri-o://4b324ee4cd8577ce10f68c2b331e64bd9feb40f12a6f53952b042bf8f0bcad35" gracePeriod=2 Jan 21 15:28:18 crc kubenswrapper[5021]: I0121 15:28:18.849693 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-xjfsb" Jan 21 15:28:19 crc kubenswrapper[5021]: I0121 15:28:19.030809 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-svghk" Jan 21 15:28:19 crc kubenswrapper[5021]: I0121 15:28:19.092185 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-svghk" Jan 21 15:28:20 crc kubenswrapper[5021]: I0121 15:28:20.602556 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xjfsb"] Jan 21 15:28:20 crc kubenswrapper[5021]: I0121 15:28:20.635438 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-bw5fp"] Jan 21 15:28:20 crc kubenswrapper[5021]: I0121 15:28:20.801323 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-xjfsb" podUID="6dacec5e-eca8-4362-82e1-95c571054d9d" containerName="registry-server" containerID="cri-o://0ae1954ae4f1dd11e8fcce8f0d7266533f88c18ac6be4e6fa8cb9de1e85f3366" gracePeriod=2 Jan 21 15:28:20 crc kubenswrapper[5021]: I0121 15:28:20.962539 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ds2cq"] Jan 21 15:28:20 crc kubenswrapper[5021]: I0121 15:28:20.962856 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-ds2cq" podUID="c3b8b814-c3f4-4494-a9c4-f08dc74d895f" containerName="registry-server" containerID="cri-o://00a4badefd8aa5a4013a834ec5479a683b4d6d49d13ffebbba339227cb30a2e2" gracePeriod=30 Jan 21 15:28:20 crc kubenswrapper[5021]: E0121 15:28:20.968383 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="00a4badefd8aa5a4013a834ec5479a683b4d6d49d13ffebbba339227cb30a2e2" cmd=["grpc_health_probe","-addr=:50051"] Jan 21 15:28:20 crc kubenswrapper[5021]: E0121 15:28:20.971700 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="00a4badefd8aa5a4013a834ec5479a683b4d6d49d13ffebbba339227cb30a2e2" cmd=["grpc_health_probe","-addr=:50051"] Jan 21 15:28:20 crc kubenswrapper[5021]: I0121 15:28:20.977359 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6sz7l"] Jan 21 15:28:20 crc kubenswrapper[5021]: E0121 15:28:20.977446 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="00a4badefd8aa5a4013a834ec5479a683b4d6d49d13ffebbba339227cb30a2e2" cmd=["grpc_health_probe","-addr=:50051"] Jan 21 15:28:20 crc kubenswrapper[5021]: E0121 15:28:20.977581 5021 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openshift-marketplace/certified-operators-ds2cq" podUID="c3b8b814-c3f4-4494-a9c4-f08dc74d895f" containerName="registry-server" Jan 21 15:28:20 crc kubenswrapper[5021]: I0121 15:28:20.977688 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-6sz7l" podUID="85c51757-e7f5-487f-b873-a543118733b6" containerName="registry-server" containerID="cri-o://25209e10f6a6ebcd7ff92784ba20d40a2ab48f9b4460622c55d38d8d1527064b" gracePeriod=30 Jan 21 15:28:20 crc kubenswrapper[5021]: I0121 15:28:20.988659 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-m2brn"] Jan 21 15:28:20 crc kubenswrapper[5021]: I0121 15:28:20.989739 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-m2brn" podUID="c0332d4a-fb12-4d96-ae36-bb7295b28a87" containerName="marketplace-operator" containerID="cri-o://587fa11fd5381b4c1315a7840db69e549090ef937cb1412e978f10377bff505f" gracePeriod=30 Jan 21 15:28:20 crc kubenswrapper[5021]: I0121 15:28:20.997089 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-g85c4"] Jan 21 15:28:20 crc kubenswrapper[5021]: I0121 15:28:20.997326 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-g85c4" podUID="f7b20493-5547-4a23-a8c4-411119736b50" containerName="registry-server" containerID="cri-o://024b6e1cbf95b0451e9d09bd9b6fa3e50a2527bcb9c929168807890b4daca109" gracePeriod=30 Jan 21 15:28:21 crc kubenswrapper[5021]: I0121 15:28:21.012434 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-74dm9"] Jan 21 15:28:21 crc kubenswrapper[5021]: I0121 15:28:21.012835 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-74dm9" podUID="e2e95c64-dd6b-4071-8cec-2832ccc612fe" containerName="registry-server" containerID="cri-o://4580e7f8be83b39fe1a73fb80e38e5f1a77ffbade5b8b4ee153d73fa446eb34f" gracePeriod=30 Jan 21 15:28:21 crc kubenswrapper[5021]: I0121 15:28:21.015180 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-w2pss"] Jan 21 15:28:21 crc kubenswrapper[5021]: E0121 15:28:21.015431 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e49c7f2-932a-44a7-950e-0e71ba1cfc1a" containerName="pruner" Jan 21 15:28:21 crc kubenswrapper[5021]: I0121 15:28:21.015449 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e49c7f2-932a-44a7-950e-0e71ba1cfc1a" containerName="pruner" Jan 21 15:28:21 crc kubenswrapper[5021]: I0121 15:28:21.015561 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="9e49c7f2-932a-44a7-950e-0e71ba1cfc1a" containerName="pruner" Jan 21 15:28:21 crc kubenswrapper[5021]: I0121 15:28:21.017426 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-w2pss" Jan 21 15:28:21 crc kubenswrapper[5021]: I0121 15:28:21.020553 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-svghk"] Jan 21 15:28:21 crc kubenswrapper[5021]: I0121 15:28:21.020764 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-svghk" podUID="02187944-ab37-42f4-898b-eced0c5a1059" containerName="registry-server" containerID="cri-o://a09e091b45b08c8e60becdf125c47a531ebadc2512ab2130b9846ff59e212bf8" gracePeriod=30 Jan 21 15:28:21 crc kubenswrapper[5021]: I0121 15:28:21.031175 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-w2pss"] Jan 21 15:28:21 crc kubenswrapper[5021]: I0121 15:28:21.079120 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rg6nc\" (UniqueName: \"kubernetes.io/projected/3139b142-599c-4da3-9a0d-5facd5ca28cc-kube-api-access-rg6nc\") pod \"marketplace-operator-79b997595-w2pss\" (UID: \"3139b142-599c-4da3-9a0d-5facd5ca28cc\") " pod="openshift-marketplace/marketplace-operator-79b997595-w2pss" Jan 21 15:28:21 crc kubenswrapper[5021]: I0121 15:28:21.079230 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3139b142-599c-4da3-9a0d-5facd5ca28cc-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-w2pss\" (UID: \"3139b142-599c-4da3-9a0d-5facd5ca28cc\") " pod="openshift-marketplace/marketplace-operator-79b997595-w2pss" Jan 21 15:28:21 crc kubenswrapper[5021]: I0121 15:28:21.079258 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/3139b142-599c-4da3-9a0d-5facd5ca28cc-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-w2pss\" (UID: \"3139b142-599c-4da3-9a0d-5facd5ca28cc\") " pod="openshift-marketplace/marketplace-operator-79b997595-w2pss" Jan 21 15:28:21 crc kubenswrapper[5021]: I0121 15:28:21.180366 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3139b142-599c-4da3-9a0d-5facd5ca28cc-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-w2pss\" (UID: \"3139b142-599c-4da3-9a0d-5facd5ca28cc\") " pod="openshift-marketplace/marketplace-operator-79b997595-w2pss" Jan 21 15:28:21 crc kubenswrapper[5021]: I0121 15:28:21.180417 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/3139b142-599c-4da3-9a0d-5facd5ca28cc-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-w2pss\" (UID: \"3139b142-599c-4da3-9a0d-5facd5ca28cc\") " pod="openshift-marketplace/marketplace-operator-79b997595-w2pss" Jan 21 15:28:21 crc kubenswrapper[5021]: I0121 15:28:21.180491 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rg6nc\" (UniqueName: \"kubernetes.io/projected/3139b142-599c-4da3-9a0d-5facd5ca28cc-kube-api-access-rg6nc\") pod \"marketplace-operator-79b997595-w2pss\" (UID: \"3139b142-599c-4da3-9a0d-5facd5ca28cc\") " pod="openshift-marketplace/marketplace-operator-79b997595-w2pss" Jan 21 15:28:21 crc kubenswrapper[5021]: I0121 15:28:21.183218 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3139b142-599c-4da3-9a0d-5facd5ca28cc-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-w2pss\" (UID: \"3139b142-599c-4da3-9a0d-5facd5ca28cc\") " pod="openshift-marketplace/marketplace-operator-79b997595-w2pss" Jan 21 15:28:21 crc kubenswrapper[5021]: I0121 15:28:21.191611 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/3139b142-599c-4da3-9a0d-5facd5ca28cc-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-w2pss\" (UID: \"3139b142-599c-4da3-9a0d-5facd5ca28cc\") " pod="openshift-marketplace/marketplace-operator-79b997595-w2pss" Jan 21 15:28:21 crc kubenswrapper[5021]: I0121 15:28:21.198155 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rg6nc\" (UniqueName: \"kubernetes.io/projected/3139b142-599c-4da3-9a0d-5facd5ca28cc-kube-api-access-rg6nc\") pod \"marketplace-operator-79b997595-w2pss\" (UID: \"3139b142-599c-4da3-9a0d-5facd5ca28cc\") " pod="openshift-marketplace/marketplace-operator-79b997595-w2pss" Jan 21 15:28:21 crc kubenswrapper[5021]: I0121 15:28:21.335231 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-w2pss" Jan 21 15:28:21 crc kubenswrapper[5021]: I0121 15:28:21.764802 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-w2pss"] Jan 21 15:28:21 crc kubenswrapper[5021]: I0121 15:28:21.808800 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-w2pss" event={"ID":"3139b142-599c-4da3-9a0d-5facd5ca28cc","Type":"ContainerStarted","Data":"7b0607cd3bf8ec44d78366accbe52c2069cb6bfb6338790d4287fce6ae65226f"} Jan 21 15:28:22 crc kubenswrapper[5021]: I0121 15:28:22.817162 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-29tp8_af36dabe-9fde-4042-b317-6568e27fee70/registry-server/0.log" Jan 21 15:28:22 crc kubenswrapper[5021]: I0121 15:28:22.818321 5021 generic.go:334] "Generic (PLEG): container finished" podID="af36dabe-9fde-4042-b317-6568e27fee70" containerID="4b324ee4cd8577ce10f68c2b331e64bd9feb40f12a6f53952b042bf8f0bcad35" exitCode=137 Jan 21 15:28:22 crc kubenswrapper[5021]: I0121 15:28:22.818399 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-29tp8" event={"ID":"af36dabe-9fde-4042-b317-6568e27fee70","Type":"ContainerDied","Data":"4b324ee4cd8577ce10f68c2b331e64bd9feb40f12a6f53952b042bf8f0bcad35"} Jan 21 15:28:22 crc kubenswrapper[5021]: I0121 15:28:22.821254 5021 generic.go:334] "Generic (PLEG): container finished" podID="6dacec5e-eca8-4362-82e1-95c571054d9d" containerID="0ae1954ae4f1dd11e8fcce8f0d7266533f88c18ac6be4e6fa8cb9de1e85f3366" exitCode=0 Jan 21 15:28:22 crc kubenswrapper[5021]: I0121 15:28:22.821347 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xjfsb" event={"ID":"6dacec5e-eca8-4362-82e1-95c571054d9d","Type":"ContainerDied","Data":"0ae1954ae4f1dd11e8fcce8f0d7266533f88c18ac6be4e6fa8cb9de1e85f3366"} Jan 21 15:28:22 crc kubenswrapper[5021]: I0121 15:28:22.823358 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-629pj_b2566bd0-1929-44cb-93b4-f09cc52bf852/registry-server/0.log" Jan 21 15:28:22 crc kubenswrapper[5021]: I0121 15:28:22.824254 5021 generic.go:334] "Generic (PLEG): container finished" podID="b2566bd0-1929-44cb-93b4-f09cc52bf852" containerID="d95eacb462ea8b501e59d681c0ef0cd51ffd449b8bfa4c2deb03fa896053bc58" exitCode=137 Jan 21 15:28:22 crc kubenswrapper[5021]: I0121 15:28:22.824290 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-629pj" event={"ID":"b2566bd0-1929-44cb-93b4-f09cc52bf852","Type":"ContainerDied","Data":"d95eacb462ea8b501e59d681c0ef0cd51ffd449b8bfa4c2deb03fa896053bc58"} Jan 21 15:28:22 crc kubenswrapper[5021]: I0121 15:28:22.825829 5021 generic.go:334] "Generic (PLEG): container finished" podID="c0332d4a-fb12-4d96-ae36-bb7295b28a87" containerID="587fa11fd5381b4c1315a7840db69e549090ef937cb1412e978f10377bff505f" exitCode=0 Jan 21 15:28:22 crc kubenswrapper[5021]: I0121 15:28:22.825875 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-m2brn" event={"ID":"c0332d4a-fb12-4d96-ae36-bb7295b28a87","Type":"ContainerDied","Data":"587fa11fd5381b4c1315a7840db69e549090ef937cb1412e978f10377bff505f"} Jan 21 15:28:22 crc kubenswrapper[5021]: I0121 15:28:22.986267 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-svghk"] Jan 21 15:28:23 crc kubenswrapper[5021]: I0121 15:28:23.834000 5021 generic.go:334] "Generic (PLEG): container finished" podID="02187944-ab37-42f4-898b-eced0c5a1059" containerID="a09e091b45b08c8e60becdf125c47a531ebadc2512ab2130b9846ff59e212bf8" exitCode=0 Jan 21 15:28:23 crc kubenswrapper[5021]: I0121 15:28:23.834067 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-svghk" event={"ID":"02187944-ab37-42f4-898b-eced0c5a1059","Type":"ContainerDied","Data":"a09e091b45b08c8e60becdf125c47a531ebadc2512ab2130b9846ff59e212bf8"} Jan 21 15:28:23 crc kubenswrapper[5021]: I0121 15:28:23.836363 5021 generic.go:334] "Generic (PLEG): container finished" podID="c3b8b814-c3f4-4494-a9c4-f08dc74d895f" containerID="00a4badefd8aa5a4013a834ec5479a683b4d6d49d13ffebbba339227cb30a2e2" exitCode=0 Jan 21 15:28:23 crc kubenswrapper[5021]: I0121 15:28:23.836420 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ds2cq" event={"ID":"c3b8b814-c3f4-4494-a9c4-f08dc74d895f","Type":"ContainerDied","Data":"00a4badefd8aa5a4013a834ec5479a683b4d6d49d13ffebbba339227cb30a2e2"} Jan 21 15:28:23 crc kubenswrapper[5021]: I0121 15:28:23.839623 5021 generic.go:334] "Generic (PLEG): container finished" podID="85c51757-e7f5-487f-b873-a543118733b6" containerID="25209e10f6a6ebcd7ff92784ba20d40a2ab48f9b4460622c55d38d8d1527064b" exitCode=0 Jan 21 15:28:23 crc kubenswrapper[5021]: I0121 15:28:23.839719 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6sz7l" event={"ID":"85c51757-e7f5-487f-b873-a543118733b6","Type":"ContainerDied","Data":"25209e10f6a6ebcd7ff92784ba20d40a2ab48f9b4460622c55d38d8d1527064b"} Jan 21 15:28:23 crc kubenswrapper[5021]: I0121 15:28:23.842232 5021 generic.go:334] "Generic (PLEG): container finished" podID="e2e95c64-dd6b-4071-8cec-2832ccc612fe" containerID="4580e7f8be83b39fe1a73fb80e38e5f1a77ffbade5b8b4ee153d73fa446eb34f" exitCode=0 Jan 21 15:28:23 crc kubenswrapper[5021]: I0121 15:28:23.842270 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-74dm9" event={"ID":"e2e95c64-dd6b-4071-8cec-2832ccc612fe","Type":"ContainerDied","Data":"4580e7f8be83b39fe1a73fb80e38e5f1a77ffbade5b8b4ee153d73fa446eb34f"} Jan 21 15:28:23 crc kubenswrapper[5021]: I0121 15:28:23.847073 5021 generic.go:334] "Generic (PLEG): container finished" podID="f7b20493-5547-4a23-a8c4-411119736b50" containerID="024b6e1cbf95b0451e9d09bd9b6fa3e50a2527bcb9c929168807890b4daca109" exitCode=0 Jan 21 15:28:23 crc kubenswrapper[5021]: I0121 15:28:23.847115 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-g85c4" event={"ID":"f7b20493-5547-4a23-a8c4-411119736b50","Type":"ContainerDied","Data":"024b6e1cbf95b0451e9d09bd9b6fa3e50a2527bcb9c929168807890b4daca109"} Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.692643 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-m2brn" Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.830423 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ds2cq" Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.837655 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/c0332d4a-fb12-4d96-ae36-bb7295b28a87-marketplace-operator-metrics\") pod \"c0332d4a-fb12-4d96-ae36-bb7295b28a87\" (UID: \"c0332d4a-fb12-4d96-ae36-bb7295b28a87\") " Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.838320 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c0332d4a-fb12-4d96-ae36-bb7295b28a87-marketplace-trusted-ca\") pod \"c0332d4a-fb12-4d96-ae36-bb7295b28a87\" (UID: \"c0332d4a-fb12-4d96-ae36-bb7295b28a87\") " Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.838353 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c3b8b814-c3f4-4494-a9c4-f08dc74d895f-utilities\") pod \"c3b8b814-c3f4-4494-a9c4-f08dc74d895f\" (UID: \"c3b8b814-c3f4-4494-a9c4-f08dc74d895f\") " Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.838379 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zvnp4\" (UniqueName: \"kubernetes.io/projected/c0332d4a-fb12-4d96-ae36-bb7295b28a87-kube-api-access-zvnp4\") pod \"c0332d4a-fb12-4d96-ae36-bb7295b28a87\" (UID: \"c0332d4a-fb12-4d96-ae36-bb7295b28a87\") " Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.839500 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c3b8b814-c3f4-4494-a9c4-f08dc74d895f-catalog-content\") pod \"c3b8b814-c3f4-4494-a9c4-f08dc74d895f\" (UID: \"c3b8b814-c3f4-4494-a9c4-f08dc74d895f\") " Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.839529 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cqpp9\" (UniqueName: \"kubernetes.io/projected/c3b8b814-c3f4-4494-a9c4-f08dc74d895f-kube-api-access-cqpp9\") pod \"c3b8b814-c3f4-4494-a9c4-f08dc74d895f\" (UID: \"c3b8b814-c3f4-4494-a9c4-f08dc74d895f\") " Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.840520 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c0332d4a-fb12-4d96-ae36-bb7295b28a87-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "c0332d4a-fb12-4d96-ae36-bb7295b28a87" (UID: "c0332d4a-fb12-4d96-ae36-bb7295b28a87"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.841706 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c3b8b814-c3f4-4494-a9c4-f08dc74d895f-utilities" (OuterVolumeSpecName: "utilities") pod "c3b8b814-c3f4-4494-a9c4-f08dc74d895f" (UID: "c3b8b814-c3f4-4494-a9c4-f08dc74d895f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.841940 5021 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c0332d4a-fb12-4d96-ae36-bb7295b28a87-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.841968 5021 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c3b8b814-c3f4-4494-a9c4-f08dc74d895f-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.844053 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-629pj_b2566bd0-1929-44cb-93b4-f09cc52bf852/registry-server/0.log" Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.844757 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-629pj" Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.850752 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c0332d4a-fb12-4d96-ae36-bb7295b28a87-kube-api-access-zvnp4" (OuterVolumeSpecName: "kube-api-access-zvnp4") pod "c0332d4a-fb12-4d96-ae36-bb7295b28a87" (UID: "c0332d4a-fb12-4d96-ae36-bb7295b28a87"). InnerVolumeSpecName "kube-api-access-zvnp4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.864008 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c3b8b814-c3f4-4494-a9c4-f08dc74d895f-kube-api-access-cqpp9" (OuterVolumeSpecName: "kube-api-access-cqpp9") pod "c3b8b814-c3f4-4494-a9c4-f08dc74d895f" (UID: "c3b8b814-c3f4-4494-a9c4-f08dc74d895f"). InnerVolumeSpecName "kube-api-access-cqpp9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.864093 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c0332d4a-fb12-4d96-ae36-bb7295b28a87-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "c0332d4a-fb12-4d96-ae36-bb7295b28a87" (UID: "c0332d4a-fb12-4d96-ae36-bb7295b28a87"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.873245 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-74dm9" Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.874313 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-74dm9" event={"ID":"e2e95c64-dd6b-4071-8cec-2832ccc612fe","Type":"ContainerDied","Data":"885e2bbd87c4240b85a684eb50733081c2f648681a81737c1939dd9d4dc4d3b9"} Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.874511 5021 scope.go:117] "RemoveContainer" containerID="4580e7f8be83b39fe1a73fb80e38e5f1a77ffbade5b8b4ee153d73fa446eb34f" Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.877829 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-svghk" Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.879728 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-m2brn" event={"ID":"c0332d4a-fb12-4d96-ae36-bb7295b28a87","Type":"ContainerDied","Data":"dfc90cf9fe0e951eb33f588aae6a30d193f048896a20f1fbb8cfa92c2eb2df3d"} Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.879797 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-m2brn" Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.897726 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6sz7l" event={"ID":"85c51757-e7f5-487f-b873-a543118733b6","Type":"ContainerDied","Data":"c76c0ede83f8f3bc1867305fab23327c14d4623e500ddb7b78156582d1e28c26"} Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.897775 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c76c0ede83f8f3bc1867305fab23327c14d4623e500ddb7b78156582d1e28c26" Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.897992 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6sz7l" Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.899019 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-w2pss" event={"ID":"3139b142-599c-4da3-9a0d-5facd5ca28cc","Type":"ContainerStarted","Data":"da725c377414f9c1d8b79da6f788276e8707e8a39a962f725adddc543090e076"} Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.899892 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-w2pss" Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.903325 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-629pj_b2566bd0-1929-44cb-93b4-f09cc52bf852/registry-server/0.log" Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.904024 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c3b8b814-c3f4-4494-a9c4-f08dc74d895f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c3b8b814-c3f4-4494-a9c4-f08dc74d895f" (UID: "c3b8b814-c3f4-4494-a9c4-f08dc74d895f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.904242 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-629pj" Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.904486 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-629pj" event={"ID":"b2566bd0-1929-44cb-93b4-f09cc52bf852","Type":"ContainerDied","Data":"01d512f63b37766e2432a5404c5d4339c207159d9fb400a6ba96abcc20e80ca5"} Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.915302 5021 scope.go:117] "RemoveContainer" containerID="35fcc96f3effb134c219b8e5a85910a1fc6fc3dfc044648f853220d3227668c9" Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.915391 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-w2pss" Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.919129 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-svghk" event={"ID":"02187944-ab37-42f4-898b-eced0c5a1059","Type":"ContainerDied","Data":"4eef0cfe17ff94f86b1eea1ff09a6a10420b6687fe32c1d85a805d71e8c43b2e"} Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.919256 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-svghk" Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.934580 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ds2cq" event={"ID":"c3b8b814-c3f4-4494-a9c4-f08dc74d895f","Type":"ContainerDied","Data":"dab72743b7fa95fb291fe1f9f42b74b4558e352fb1e665183b04d6348396a5d7"} Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.934647 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ds2cq" Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.942973 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/02187944-ab37-42f4-898b-eced0c5a1059-catalog-content\") pod \"02187944-ab37-42f4-898b-eced0c5a1059\" (UID: \"02187944-ab37-42f4-898b-eced0c5a1059\") " Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.943322 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e2e95c64-dd6b-4071-8cec-2832ccc612fe-utilities\") pod \"e2e95c64-dd6b-4071-8cec-2832ccc612fe\" (UID: \"e2e95c64-dd6b-4071-8cec-2832ccc612fe\") " Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.943423 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vpf9d\" (UniqueName: \"kubernetes.io/projected/85c51757-e7f5-487f-b873-a543118733b6-kube-api-access-vpf9d\") pod \"85c51757-e7f5-487f-b873-a543118733b6\" (UID: \"85c51757-e7f5-487f-b873-a543118733b6\") " Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.943540 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gp9n2\" (UniqueName: \"kubernetes.io/projected/02187944-ab37-42f4-898b-eced0c5a1059-kube-api-access-gp9n2\") pod \"02187944-ab37-42f4-898b-eced0c5a1059\" (UID: \"02187944-ab37-42f4-898b-eced0c5a1059\") " Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.943634 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85c51757-e7f5-487f-b873-a543118733b6-catalog-content\") pod \"85c51757-e7f5-487f-b873-a543118733b6\" (UID: \"85c51757-e7f5-487f-b873-a543118733b6\") " Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.943739 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e2e95c64-dd6b-4071-8cec-2832ccc612fe-catalog-content\") pod \"e2e95c64-dd6b-4071-8cec-2832ccc612fe\" (UID: \"e2e95c64-dd6b-4071-8cec-2832ccc612fe\") " Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.944129 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bt8s2\" (UniqueName: \"kubernetes.io/projected/b2566bd0-1929-44cb-93b4-f09cc52bf852-kube-api-access-bt8s2\") pod \"b2566bd0-1929-44cb-93b4-f09cc52bf852\" (UID: \"b2566bd0-1929-44cb-93b4-f09cc52bf852\") " Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.944292 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/02187944-ab37-42f4-898b-eced0c5a1059-utilities\") pod \"02187944-ab37-42f4-898b-eced0c5a1059\" (UID: \"02187944-ab37-42f4-898b-eced0c5a1059\") " Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.944396 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b2566bd0-1929-44cb-93b4-f09cc52bf852-utilities\") pod \"b2566bd0-1929-44cb-93b4-f09cc52bf852\" (UID: \"b2566bd0-1929-44cb-93b4-f09cc52bf852\") " Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.944492 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gsjjr\" (UniqueName: \"kubernetes.io/projected/e2e95c64-dd6b-4071-8cec-2832ccc612fe-kube-api-access-gsjjr\") pod \"e2e95c64-dd6b-4071-8cec-2832ccc612fe\" (UID: \"e2e95c64-dd6b-4071-8cec-2832ccc612fe\") " Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.944621 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85c51757-e7f5-487f-b873-a543118733b6-utilities\") pod \"85c51757-e7f5-487f-b873-a543118733b6\" (UID: \"85c51757-e7f5-487f-b873-a543118733b6\") " Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.944821 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b2566bd0-1929-44cb-93b4-f09cc52bf852-catalog-content\") pod \"b2566bd0-1929-44cb-93b4-f09cc52bf852\" (UID: \"b2566bd0-1929-44cb-93b4-f09cc52bf852\") " Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.945326 5021 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c3b8b814-c3f4-4494-a9c4-f08dc74d895f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.945457 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cqpp9\" (UniqueName: \"kubernetes.io/projected/c3b8b814-c3f4-4494-a9c4-f08dc74d895f-kube-api-access-cqpp9\") on node \"crc\" DevicePath \"\"" Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.945557 5021 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/c0332d4a-fb12-4d96-ae36-bb7295b28a87-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.945666 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zvnp4\" (UniqueName: \"kubernetes.io/projected/c0332d4a-fb12-4d96-ae36-bb7295b28a87-kube-api-access-zvnp4\") on node \"crc\" DevicePath \"\"" Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.946500 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e2e95c64-dd6b-4071-8cec-2832ccc612fe-utilities" (OuterVolumeSpecName: "utilities") pod "e2e95c64-dd6b-4071-8cec-2832ccc612fe" (UID: "e2e95c64-dd6b-4071-8cec-2832ccc612fe"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.947178 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/02187944-ab37-42f4-898b-eced0c5a1059-kube-api-access-gp9n2" (OuterVolumeSpecName: "kube-api-access-gp9n2") pod "02187944-ab37-42f4-898b-eced0c5a1059" (UID: "02187944-ab37-42f4-898b-eced0c5a1059"). InnerVolumeSpecName "kube-api-access-gp9n2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.949415 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e2e95c64-dd6b-4071-8cec-2832ccc612fe-kube-api-access-gsjjr" (OuterVolumeSpecName: "kube-api-access-gsjjr") pod "e2e95c64-dd6b-4071-8cec-2832ccc612fe" (UID: "e2e95c64-dd6b-4071-8cec-2832ccc612fe"). InnerVolumeSpecName "kube-api-access-gsjjr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.950615 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/02187944-ab37-42f4-898b-eced0c5a1059-utilities" (OuterVolumeSpecName: "utilities") pod "02187944-ab37-42f4-898b-eced0c5a1059" (UID: "02187944-ab37-42f4-898b-eced0c5a1059"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.951686 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/85c51757-e7f5-487f-b873-a543118733b6-utilities" (OuterVolumeSpecName: "utilities") pod "85c51757-e7f5-487f-b873-a543118733b6" (UID: "85c51757-e7f5-487f-b873-a543118733b6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.952842 5021 scope.go:117] "RemoveContainer" containerID="90dd51da92b6899797d9b59aa29c7f1b1b1477da349c5e6277599d062cf1d118" Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.954460 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b2566bd0-1929-44cb-93b4-f09cc52bf852-utilities" (OuterVolumeSpecName: "utilities") pod "b2566bd0-1929-44cb-93b4-f09cc52bf852" (UID: "b2566bd0-1929-44cb-93b4-f09cc52bf852"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.959199 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b2566bd0-1929-44cb-93b4-f09cc52bf852-kube-api-access-bt8s2" (OuterVolumeSpecName: "kube-api-access-bt8s2") pod "b2566bd0-1929-44cb-93b4-f09cc52bf852" (UID: "b2566bd0-1929-44cb-93b4-f09cc52bf852"). InnerVolumeSpecName "kube-api-access-bt8s2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.983555 5021 scope.go:117] "RemoveContainer" containerID="587fa11fd5381b4c1315a7840db69e549090ef937cb1412e978f10377bff505f" Jan 21 15:28:24 crc kubenswrapper[5021]: I0121 15:28:24.987885 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/85c51757-e7f5-487f-b873-a543118733b6-kube-api-access-vpf9d" (OuterVolumeSpecName: "kube-api-access-vpf9d") pod "85c51757-e7f5-487f-b873-a543118733b6" (UID: "85c51757-e7f5-487f-b873-a543118733b6"). InnerVolumeSpecName "kube-api-access-vpf9d". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.011900 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-w2pss" podStartSLOduration=5.011876847 podStartE2EDuration="5.011876847s" podCreationTimestamp="2026-01-21 15:28:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:28:24.983866531 +0000 UTC m=+246.518980420" watchObservedRunningTime="2026-01-21 15:28:25.011876847 +0000 UTC m=+246.546990736" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.020647 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-g85c4" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.021637 5021 scope.go:117] "RemoveContainer" containerID="d95eacb462ea8b501e59d681c0ef0cd51ffd449b8bfa4c2deb03fa896053bc58" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.041997 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ds2cq"] Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.049225 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7b20493-5547-4a23-a8c4-411119736b50-catalog-content\") pod \"f7b20493-5547-4a23-a8c4-411119736b50\" (UID: \"f7b20493-5547-4a23-a8c4-411119736b50\") " Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.049309 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7b20493-5547-4a23-a8c4-411119736b50-utilities\") pod \"f7b20493-5547-4a23-a8c4-411119736b50\" (UID: \"f7b20493-5547-4a23-a8c4-411119736b50\") " Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.049416 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qrml2\" (UniqueName: \"kubernetes.io/projected/f7b20493-5547-4a23-a8c4-411119736b50-kube-api-access-qrml2\") pod \"f7b20493-5547-4a23-a8c4-411119736b50\" (UID: \"f7b20493-5547-4a23-a8c4-411119736b50\") " Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.049786 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gp9n2\" (UniqueName: \"kubernetes.io/projected/02187944-ab37-42f4-898b-eced0c5a1059-kube-api-access-gp9n2\") on node \"crc\" DevicePath \"\"" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.049802 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bt8s2\" (UniqueName: \"kubernetes.io/projected/b2566bd0-1929-44cb-93b4-f09cc52bf852-kube-api-access-bt8s2\") on node \"crc\" DevicePath \"\"" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.049815 5021 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/02187944-ab37-42f4-898b-eced0c5a1059-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.049827 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gsjjr\" (UniqueName: \"kubernetes.io/projected/e2e95c64-dd6b-4071-8cec-2832ccc612fe-kube-api-access-gsjjr\") on node \"crc\" DevicePath \"\"" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.049840 5021 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b2566bd0-1929-44cb-93b4-f09cc52bf852-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.049851 5021 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85c51757-e7f5-487f-b873-a543118733b6-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.049862 5021 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e2e95c64-dd6b-4071-8cec-2832ccc612fe-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.049874 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vpf9d\" (UniqueName: \"kubernetes.io/projected/85c51757-e7f5-487f-b873-a543118733b6-kube-api-access-vpf9d\") on node \"crc\" DevicePath \"\"" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.060285 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f7b20493-5547-4a23-a8c4-411119736b50-utilities" (OuterVolumeSpecName: "utilities") pod "f7b20493-5547-4a23-a8c4-411119736b50" (UID: "f7b20493-5547-4a23-a8c4-411119736b50"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.068053 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-ds2cq"] Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.068136 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f7b20493-5547-4a23-a8c4-411119736b50-kube-api-access-qrml2" (OuterVolumeSpecName: "kube-api-access-qrml2") pod "f7b20493-5547-4a23-a8c4-411119736b50" (UID: "f7b20493-5547-4a23-a8c4-411119736b50"). InnerVolumeSpecName "kube-api-access-qrml2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.070197 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b2566bd0-1929-44cb-93b4-f09cc52bf852-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b2566bd0-1929-44cb-93b4-f09cc52bf852" (UID: "b2566bd0-1929-44cb-93b4-f09cc52bf852"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.080627 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xjfsb" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.080662 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f7b20493-5547-4a23-a8c4-411119736b50-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f7b20493-5547-4a23-a8c4-411119736b50" (UID: "f7b20493-5547-4a23-a8c4-411119736b50"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.085458 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/85c51757-e7f5-487f-b873-a543118733b6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "85c51757-e7f5-487f-b873-a543118733b6" (UID: "85c51757-e7f5-487f-b873-a543118733b6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.098617 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-29tp8_af36dabe-9fde-4042-b317-6568e27fee70/registry-server/0.log" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.103491 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-29tp8" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.112737 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-m2brn"] Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.116275 5021 scope.go:117] "RemoveContainer" containerID="d53c1ada960e0d0e2749a4b265b2c41409a31b6493cedca94c4bd351bfa37ef9" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.120153 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-m2brn"] Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.155731 5021 scope.go:117] "RemoveContainer" containerID="326dfcbf0f6f581ed121f94ab91cff85323b3f3018ec8a0094a19cd3221a6621" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.157841 5021 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b2566bd0-1929-44cb-93b4-f09cc52bf852-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.157935 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qrml2\" (UniqueName: \"kubernetes.io/projected/f7b20493-5547-4a23-a8c4-411119736b50-kube-api-access-qrml2\") on node \"crc\" DevicePath \"\"" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.157967 5021 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7b20493-5547-4a23-a8c4-411119736b50-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.157983 5021 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85c51757-e7f5-487f-b873-a543118733b6-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.158000 5021 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7b20493-5547-4a23-a8c4-411119736b50-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.188517 5021 scope.go:117] "RemoveContainer" containerID="a09e091b45b08c8e60becdf125c47a531ebadc2512ab2130b9846ff59e212bf8" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.204189 5021 scope.go:117] "RemoveContainer" containerID="b14a64342a899e361781fffb52baa1f918dc0ed5e15199e860bc597aaa53e9b0" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.220301 5021 scope.go:117] "RemoveContainer" containerID="601765261def215bd28bfaccff36ff0b7e1bcd81a4a9d09a0f29521a4ff1997b" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.220369 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e2e95c64-dd6b-4071-8cec-2832ccc612fe-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e2e95c64-dd6b-4071-8cec-2832ccc612fe" (UID: "e2e95c64-dd6b-4071-8cec-2832ccc612fe"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.239440 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-629pj"] Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.244647 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-629pj"] Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.246342 5021 scope.go:117] "RemoveContainer" containerID="00a4badefd8aa5a4013a834ec5479a683b4d6d49d13ffebbba339227cb30a2e2" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.259389 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p2q9v\" (UniqueName: \"kubernetes.io/projected/6dacec5e-eca8-4362-82e1-95c571054d9d-kube-api-access-p2q9v\") pod \"6dacec5e-eca8-4362-82e1-95c571054d9d\" (UID: \"6dacec5e-eca8-4362-82e1-95c571054d9d\") " Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.259513 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/af36dabe-9fde-4042-b317-6568e27fee70-catalog-content\") pod \"af36dabe-9fde-4042-b317-6568e27fee70\" (UID: \"af36dabe-9fde-4042-b317-6568e27fee70\") " Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.259556 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6dacec5e-eca8-4362-82e1-95c571054d9d-catalog-content\") pod \"6dacec5e-eca8-4362-82e1-95c571054d9d\" (UID: \"6dacec5e-eca8-4362-82e1-95c571054d9d\") " Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.259578 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/af36dabe-9fde-4042-b317-6568e27fee70-utilities\") pod \"af36dabe-9fde-4042-b317-6568e27fee70\" (UID: \"af36dabe-9fde-4042-b317-6568e27fee70\") " Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.259604 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8fv27\" (UniqueName: \"kubernetes.io/projected/af36dabe-9fde-4042-b317-6568e27fee70-kube-api-access-8fv27\") pod \"af36dabe-9fde-4042-b317-6568e27fee70\" (UID: \"af36dabe-9fde-4042-b317-6568e27fee70\") " Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.259656 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6dacec5e-eca8-4362-82e1-95c571054d9d-utilities\") pod \"6dacec5e-eca8-4362-82e1-95c571054d9d\" (UID: \"6dacec5e-eca8-4362-82e1-95c571054d9d\") " Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.260823 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/af36dabe-9fde-4042-b317-6568e27fee70-utilities" (OuterVolumeSpecName: "utilities") pod "af36dabe-9fde-4042-b317-6568e27fee70" (UID: "af36dabe-9fde-4042-b317-6568e27fee70"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.260852 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6dacec5e-eca8-4362-82e1-95c571054d9d-utilities" (OuterVolumeSpecName: "utilities") pod "6dacec5e-eca8-4362-82e1-95c571054d9d" (UID: "6dacec5e-eca8-4362-82e1-95c571054d9d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.260947 5021 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e2e95c64-dd6b-4071-8cec-2832ccc612fe-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.260994 5021 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/af36dabe-9fde-4042-b317-6568e27fee70-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.261009 5021 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6dacec5e-eca8-4362-82e1-95c571054d9d-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.262340 5021 scope.go:117] "RemoveContainer" containerID="3580652171defab3c78557cec3b6cd1e23bf72d49dff3a474eb5ac956dde65b3" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.263240 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/af36dabe-9fde-4042-b317-6568e27fee70-kube-api-access-8fv27" (OuterVolumeSpecName: "kube-api-access-8fv27") pod "af36dabe-9fde-4042-b317-6568e27fee70" (UID: "af36dabe-9fde-4042-b317-6568e27fee70"). InnerVolumeSpecName "kube-api-access-8fv27". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.264076 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6dacec5e-eca8-4362-82e1-95c571054d9d-kube-api-access-p2q9v" (OuterVolumeSpecName: "kube-api-access-p2q9v") pod "6dacec5e-eca8-4362-82e1-95c571054d9d" (UID: "6dacec5e-eca8-4362-82e1-95c571054d9d"). InnerVolumeSpecName "kube-api-access-p2q9v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.271640 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/02187944-ab37-42f4-898b-eced0c5a1059-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "02187944-ab37-42f4-898b-eced0c5a1059" (UID: "02187944-ab37-42f4-898b-eced0c5a1059"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.281221 5021 scope.go:117] "RemoveContainer" containerID="7d36ec51cd1407d0e2c6d6ab72c7a870318fc7ed77eecb119724bdd8a994eaf2" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.287757 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6dacec5e-eca8-4362-82e1-95c571054d9d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6dacec5e-eca8-4362-82e1-95c571054d9d" (UID: "6dacec5e-eca8-4362-82e1-95c571054d9d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.324039 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/af36dabe-9fde-4042-b317-6568e27fee70-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "af36dabe-9fde-4042-b317-6568e27fee70" (UID: "af36dabe-9fde-4042-b317-6568e27fee70"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.361710 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p2q9v\" (UniqueName: \"kubernetes.io/projected/6dacec5e-eca8-4362-82e1-95c571054d9d-kube-api-access-p2q9v\") on node \"crc\" DevicePath \"\"" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.361795 5021 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/02187944-ab37-42f4-898b-eced0c5a1059-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.361808 5021 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/af36dabe-9fde-4042-b317-6568e27fee70-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.361818 5021 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6dacec5e-eca8-4362-82e1-95c571054d9d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.361830 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8fv27\" (UniqueName: \"kubernetes.io/projected/af36dabe-9fde-4042-b317-6568e27fee70-kube-api-access-8fv27\") on node \"crc\" DevicePath \"\"" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.560772 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-svghk"] Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.566148 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-svghk"] Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.942742 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xjfsb" event={"ID":"6dacec5e-eca8-4362-82e1-95c571054d9d","Type":"ContainerDied","Data":"5884b0f7cb4155b9bfd89d647dedf620f00271a9f9254a030fc28b391289f87f"} Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.942831 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xjfsb" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.943727 5021 scope.go:117] "RemoveContainer" containerID="0ae1954ae4f1dd11e8fcce8f0d7266533f88c18ac6be4e6fa8cb9de1e85f3366" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.945177 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-g85c4" event={"ID":"f7b20493-5547-4a23-a8c4-411119736b50","Type":"ContainerDied","Data":"c2194e882d6879c2fdf51a7ea600619872a58190c95eecc7cc4b2ae0aaa6e5ea"} Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.945210 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-g85c4" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.963598 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-74dm9" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.971204 5021 scope.go:117] "RemoveContainer" containerID="91f5c3828a7794c41c871d9d8e499047e10bcc601d85100f829729d98893c2ee" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.977494 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-29tp8_af36dabe-9fde-4042-b317-6568e27fee70/registry-server/0.log" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.994746 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-g85c4"] Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.995748 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-29tp8" event={"ID":"af36dabe-9fde-4042-b317-6568e27fee70","Type":"ContainerDied","Data":"d43a3db41dd53a96e2b70b862c39ca12ce7e9c12ef7a69cdb8cdc2d112fcdd0e"} Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.995779 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6sz7l" Jan 21 15:28:25 crc kubenswrapper[5021]: I0121 15:28:25.998177 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-29tp8" Jan 21 15:28:26 crc kubenswrapper[5021]: I0121 15:28:26.009124 5021 scope.go:117] "RemoveContainer" containerID="8b011be0440def67096984fb81ba6d2bddd3489480ed644485973b2acf0620b3" Jan 21 15:28:26 crc kubenswrapper[5021]: I0121 15:28:26.017472 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-g85c4"] Jan 21 15:28:26 crc kubenswrapper[5021]: I0121 15:28:26.036611 5021 scope.go:117] "RemoveContainer" containerID="024b6e1cbf95b0451e9d09bd9b6fa3e50a2527bcb9c929168807890b4daca109" Jan 21 15:28:26 crc kubenswrapper[5021]: I0121 15:28:26.037659 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xjfsb"] Jan 21 15:28:26 crc kubenswrapper[5021]: I0121 15:28:26.044716 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-xjfsb"] Jan 21 15:28:26 crc kubenswrapper[5021]: I0121 15:28:26.056066 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-74dm9"] Jan 21 15:28:26 crc kubenswrapper[5021]: I0121 15:28:26.068814 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-74dm9"] Jan 21 15:28:26 crc kubenswrapper[5021]: I0121 15:28:26.078856 5021 scope.go:117] "RemoveContainer" containerID="2769b461654fa2b7718f0fd57c1436766474f0766264b2435b6bf4763b66cacd" Jan 21 15:28:26 crc kubenswrapper[5021]: I0121 15:28:26.081079 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-29tp8"] Jan 21 15:28:26 crc kubenswrapper[5021]: I0121 15:28:26.088435 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-29tp8"] Jan 21 15:28:26 crc kubenswrapper[5021]: I0121 15:28:26.104541 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6sz7l"] Jan 21 15:28:26 crc kubenswrapper[5021]: I0121 15:28:26.104979 5021 scope.go:117] "RemoveContainer" containerID="ae92eb1c218afa919addafa83cd99eeca12614a69420a29f130b3a3337127886" Jan 21 15:28:26 crc kubenswrapper[5021]: I0121 15:28:26.110135 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-6sz7l"] Jan 21 15:28:26 crc kubenswrapper[5021]: I0121 15:28:26.121238 5021 scope.go:117] "RemoveContainer" containerID="4b324ee4cd8577ce10f68c2b331e64bd9feb40f12a6f53952b042bf8f0bcad35" Jan 21 15:28:26 crc kubenswrapper[5021]: I0121 15:28:26.136521 5021 scope.go:117] "RemoveContainer" containerID="0c560f35715802962e7390cbb1f41cd843130d34979d199364a1a7dc4fcbd700" Jan 21 15:28:26 crc kubenswrapper[5021]: I0121 15:28:26.152285 5021 scope.go:117] "RemoveContainer" containerID="f8f4ae4a2e0af3c461b2d795527c54b4a7bfd7d0450c0d9f211bf9a3fb0d98f7" Jan 21 15:28:26 crc kubenswrapper[5021]: I0121 15:28:26.748592 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="02187944-ab37-42f4-898b-eced0c5a1059" path="/var/lib/kubelet/pods/02187944-ab37-42f4-898b-eced0c5a1059/volumes" Jan 21 15:28:26 crc kubenswrapper[5021]: I0121 15:28:26.749773 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6dacec5e-eca8-4362-82e1-95c571054d9d" path="/var/lib/kubelet/pods/6dacec5e-eca8-4362-82e1-95c571054d9d/volumes" Jan 21 15:28:26 crc kubenswrapper[5021]: I0121 15:28:26.750627 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="85c51757-e7f5-487f-b873-a543118733b6" path="/var/lib/kubelet/pods/85c51757-e7f5-487f-b873-a543118733b6/volumes" Jan 21 15:28:26 crc kubenswrapper[5021]: I0121 15:28:26.751954 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="af36dabe-9fde-4042-b317-6568e27fee70" path="/var/lib/kubelet/pods/af36dabe-9fde-4042-b317-6568e27fee70/volumes" Jan 21 15:28:26 crc kubenswrapper[5021]: I0121 15:28:26.752659 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b2566bd0-1929-44cb-93b4-f09cc52bf852" path="/var/lib/kubelet/pods/b2566bd0-1929-44cb-93b4-f09cc52bf852/volumes" Jan 21 15:28:26 crc kubenswrapper[5021]: I0121 15:28:26.754003 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c0332d4a-fb12-4d96-ae36-bb7295b28a87" path="/var/lib/kubelet/pods/c0332d4a-fb12-4d96-ae36-bb7295b28a87/volumes" Jan 21 15:28:26 crc kubenswrapper[5021]: I0121 15:28:26.754629 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c3b8b814-c3f4-4494-a9c4-f08dc74d895f" path="/var/lib/kubelet/pods/c3b8b814-c3f4-4494-a9c4-f08dc74d895f/volumes" Jan 21 15:28:26 crc kubenswrapper[5021]: I0121 15:28:26.755384 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e2e95c64-dd6b-4071-8cec-2832ccc612fe" path="/var/lib/kubelet/pods/e2e95c64-dd6b-4071-8cec-2832ccc612fe/volumes" Jan 21 15:28:26 crc kubenswrapper[5021]: I0121 15:28:26.756694 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f7b20493-5547-4a23-a8c4-411119736b50" path="/var/lib/kubelet/pods/f7b20493-5547-4a23-a8c4-411119736b50/volumes" Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.199387 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-zxtgj"] Jan 21 15:28:27 crc kubenswrapper[5021]: E0121 15:28:27.199874 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2566bd0-1929-44cb-93b4-f09cc52bf852" containerName="registry-server" Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.199892 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2566bd0-1929-44cb-93b4-f09cc52bf852" containerName="registry-server" Jan 21 15:28:27 crc kubenswrapper[5021]: E0121 15:28:27.199926 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2e95c64-dd6b-4071-8cec-2832ccc612fe" containerName="extract-content" Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.199936 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2e95c64-dd6b-4071-8cec-2832ccc612fe" containerName="extract-content" Jan 21 15:28:27 crc kubenswrapper[5021]: E0121 15:28:27.199948 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6dacec5e-eca8-4362-82e1-95c571054d9d" containerName="registry-server" Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.199955 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="6dacec5e-eca8-4362-82e1-95c571054d9d" containerName="registry-server" Jan 21 15:28:27 crc kubenswrapper[5021]: E0121 15:28:27.199968 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85c51757-e7f5-487f-b873-a543118733b6" containerName="registry-server" Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.199977 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="85c51757-e7f5-487f-b873-a543118733b6" containerName="registry-server" Jan 21 15:28:27 crc kubenswrapper[5021]: E0121 15:28:27.199984 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7b20493-5547-4a23-a8c4-411119736b50" containerName="extract-utilities" Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.199991 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7b20493-5547-4a23-a8c4-411119736b50" containerName="extract-utilities" Jan 21 15:28:27 crc kubenswrapper[5021]: E0121 15:28:27.199999 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0332d4a-fb12-4d96-ae36-bb7295b28a87" containerName="marketplace-operator" Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.200029 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0332d4a-fb12-4d96-ae36-bb7295b28a87" containerName="marketplace-operator" Jan 21 15:28:27 crc kubenswrapper[5021]: E0121 15:28:27.200039 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6dacec5e-eca8-4362-82e1-95c571054d9d" containerName="extract-content" Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.200049 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="6dacec5e-eca8-4362-82e1-95c571054d9d" containerName="extract-content" Jan 21 15:28:27 crc kubenswrapper[5021]: E0121 15:28:27.200062 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85c51757-e7f5-487f-b873-a543118733b6" containerName="extract-content" Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.200071 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="85c51757-e7f5-487f-b873-a543118733b6" containerName="extract-content" Jan 21 15:28:27 crc kubenswrapper[5021]: E0121 15:28:27.200081 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85c51757-e7f5-487f-b873-a543118733b6" containerName="extract-utilities" Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.200089 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="85c51757-e7f5-487f-b873-a543118733b6" containerName="extract-utilities" Jan 21 15:28:27 crc kubenswrapper[5021]: E0121 15:28:27.200101 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2e95c64-dd6b-4071-8cec-2832ccc612fe" containerName="extract-utilities" Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.200110 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2e95c64-dd6b-4071-8cec-2832ccc612fe" containerName="extract-utilities" Jan 21 15:28:27 crc kubenswrapper[5021]: E0121 15:28:27.200119 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2566bd0-1929-44cb-93b4-f09cc52bf852" containerName="extract-content" Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.200126 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2566bd0-1929-44cb-93b4-f09cc52bf852" containerName="extract-content" Jan 21 15:28:27 crc kubenswrapper[5021]: E0121 15:28:27.200137 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c3b8b814-c3f4-4494-a9c4-f08dc74d895f" containerName="extract-utilities" Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.200145 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="c3b8b814-c3f4-4494-a9c4-f08dc74d895f" containerName="extract-utilities" Jan 21 15:28:27 crc kubenswrapper[5021]: E0121 15:28:27.200153 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6dacec5e-eca8-4362-82e1-95c571054d9d" containerName="extract-utilities" Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.200160 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="6dacec5e-eca8-4362-82e1-95c571054d9d" containerName="extract-utilities" Jan 21 15:28:27 crc kubenswrapper[5021]: E0121 15:28:27.200170 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c3b8b814-c3f4-4494-a9c4-f08dc74d895f" containerName="extract-content" Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.200179 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="c3b8b814-c3f4-4494-a9c4-f08dc74d895f" containerName="extract-content" Jan 21 15:28:27 crc kubenswrapper[5021]: E0121 15:28:27.200191 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af36dabe-9fde-4042-b317-6568e27fee70" containerName="extract-content" Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.200199 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="af36dabe-9fde-4042-b317-6568e27fee70" containerName="extract-content" Jan 21 15:28:27 crc kubenswrapper[5021]: E0121 15:28:27.200211 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7b20493-5547-4a23-a8c4-411119736b50" containerName="registry-server" Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.200218 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7b20493-5547-4a23-a8c4-411119736b50" containerName="registry-server" Jan 21 15:28:27 crc kubenswrapper[5021]: E0121 15:28:27.200227 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2e95c64-dd6b-4071-8cec-2832ccc612fe" containerName="registry-server" Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.200236 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2e95c64-dd6b-4071-8cec-2832ccc612fe" containerName="registry-server" Jan 21 15:28:27 crc kubenswrapper[5021]: E0121 15:28:27.200245 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02187944-ab37-42f4-898b-eced0c5a1059" containerName="registry-server" Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.200254 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="02187944-ab37-42f4-898b-eced0c5a1059" containerName="registry-server" Jan 21 15:28:27 crc kubenswrapper[5021]: E0121 15:28:27.200262 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af36dabe-9fde-4042-b317-6568e27fee70" containerName="registry-server" Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.200270 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="af36dabe-9fde-4042-b317-6568e27fee70" containerName="registry-server" Jan 21 15:28:27 crc kubenswrapper[5021]: E0121 15:28:27.200282 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02187944-ab37-42f4-898b-eced0c5a1059" containerName="extract-content" Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.200289 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="02187944-ab37-42f4-898b-eced0c5a1059" containerName="extract-content" Jan 21 15:28:27 crc kubenswrapper[5021]: E0121 15:28:27.200299 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c3b8b814-c3f4-4494-a9c4-f08dc74d895f" containerName="registry-server" Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.200306 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="c3b8b814-c3f4-4494-a9c4-f08dc74d895f" containerName="registry-server" Jan 21 15:28:27 crc kubenswrapper[5021]: E0121 15:28:27.200316 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7b20493-5547-4a23-a8c4-411119736b50" containerName="extract-content" Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.200324 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7b20493-5547-4a23-a8c4-411119736b50" containerName="extract-content" Jan 21 15:28:27 crc kubenswrapper[5021]: E0121 15:28:27.200334 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af36dabe-9fde-4042-b317-6568e27fee70" containerName="extract-utilities" Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.200342 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="af36dabe-9fde-4042-b317-6568e27fee70" containerName="extract-utilities" Jan 21 15:28:27 crc kubenswrapper[5021]: E0121 15:28:27.200351 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2566bd0-1929-44cb-93b4-f09cc52bf852" containerName="extract-utilities" Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.200359 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2566bd0-1929-44cb-93b4-f09cc52bf852" containerName="extract-utilities" Jan 21 15:28:27 crc kubenswrapper[5021]: E0121 15:28:27.200367 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02187944-ab37-42f4-898b-eced0c5a1059" containerName="extract-utilities" Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.200375 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="02187944-ab37-42f4-898b-eced0c5a1059" containerName="extract-utilities" Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.200485 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="f7b20493-5547-4a23-a8c4-411119736b50" containerName="registry-server" Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.200631 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="af36dabe-9fde-4042-b317-6568e27fee70" containerName="registry-server" Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.200645 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="02187944-ab37-42f4-898b-eced0c5a1059" containerName="registry-server" Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.200657 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="6dacec5e-eca8-4362-82e1-95c571054d9d" containerName="registry-server" Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.200669 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="e2e95c64-dd6b-4071-8cec-2832ccc612fe" containerName="registry-server" Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.200679 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="85c51757-e7f5-487f-b873-a543118733b6" containerName="registry-server" Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.200689 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2566bd0-1929-44cb-93b4-f09cc52bf852" containerName="registry-server" Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.200702 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="c0332d4a-fb12-4d96-ae36-bb7295b28a87" containerName="marketplace-operator" Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.200710 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="c3b8b814-c3f4-4494-a9c4-f08dc74d895f" containerName="registry-server" Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.201627 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zxtgj" Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.205364 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.212621 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-zxtgj"] Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.306018 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f987e070-06b8-47d4-bc4a-441649d5d9e9-catalog-content\") pod \"community-operators-zxtgj\" (UID: \"f987e070-06b8-47d4-bc4a-441649d5d9e9\") " pod="openshift-marketplace/community-operators-zxtgj" Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.306081 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f987e070-06b8-47d4-bc4a-441649d5d9e9-utilities\") pod \"community-operators-zxtgj\" (UID: \"f987e070-06b8-47d4-bc4a-441649d5d9e9\") " pod="openshift-marketplace/community-operators-zxtgj" Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.306113 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5fbbm\" (UniqueName: \"kubernetes.io/projected/f987e070-06b8-47d4-bc4a-441649d5d9e9-kube-api-access-5fbbm\") pod \"community-operators-zxtgj\" (UID: \"f987e070-06b8-47d4-bc4a-441649d5d9e9\") " pod="openshift-marketplace/community-operators-zxtgj" Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.409780 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f987e070-06b8-47d4-bc4a-441649d5d9e9-catalog-content\") pod \"community-operators-zxtgj\" (UID: \"f987e070-06b8-47d4-bc4a-441649d5d9e9\") " pod="openshift-marketplace/community-operators-zxtgj" Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.410156 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f987e070-06b8-47d4-bc4a-441649d5d9e9-utilities\") pod \"community-operators-zxtgj\" (UID: \"f987e070-06b8-47d4-bc4a-441649d5d9e9\") " pod="openshift-marketplace/community-operators-zxtgj" Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.410254 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5fbbm\" (UniqueName: \"kubernetes.io/projected/f987e070-06b8-47d4-bc4a-441649d5d9e9-kube-api-access-5fbbm\") pod \"community-operators-zxtgj\" (UID: \"f987e070-06b8-47d4-bc4a-441649d5d9e9\") " pod="openshift-marketplace/community-operators-zxtgj" Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.411133 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f987e070-06b8-47d4-bc4a-441649d5d9e9-catalog-content\") pod \"community-operators-zxtgj\" (UID: \"f987e070-06b8-47d4-bc4a-441649d5d9e9\") " pod="openshift-marketplace/community-operators-zxtgj" Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.411612 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f987e070-06b8-47d4-bc4a-441649d5d9e9-utilities\") pod \"community-operators-zxtgj\" (UID: \"f987e070-06b8-47d4-bc4a-441649d5d9e9\") " pod="openshift-marketplace/community-operators-zxtgj" Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.439145 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5fbbm\" (UniqueName: \"kubernetes.io/projected/f987e070-06b8-47d4-bc4a-441649d5d9e9-kube-api-access-5fbbm\") pod \"community-operators-zxtgj\" (UID: \"f987e070-06b8-47d4-bc4a-441649d5d9e9\") " pod="openshift-marketplace/community-operators-zxtgj" Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.523211 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zxtgj" Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.711069 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-zxtgj"] Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.791932 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-xgz8b"] Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.793043 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xgz8b" Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.796360 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.807652 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xgz8b"] Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.917162 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b45643f8-e7b2-4c4f-8da1-b4e753886a05-utilities\") pod \"redhat-operators-xgz8b\" (UID: \"b45643f8-e7b2-4c4f-8da1-b4e753886a05\") " pod="openshift-marketplace/redhat-operators-xgz8b" Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.917759 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rtgws\" (UniqueName: \"kubernetes.io/projected/b45643f8-e7b2-4c4f-8da1-b4e753886a05-kube-api-access-rtgws\") pod \"redhat-operators-xgz8b\" (UID: \"b45643f8-e7b2-4c4f-8da1-b4e753886a05\") " pod="openshift-marketplace/redhat-operators-xgz8b" Jan 21 15:28:27 crc kubenswrapper[5021]: I0121 15:28:27.917833 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b45643f8-e7b2-4c4f-8da1-b4e753886a05-catalog-content\") pod \"redhat-operators-xgz8b\" (UID: \"b45643f8-e7b2-4c4f-8da1-b4e753886a05\") " pod="openshift-marketplace/redhat-operators-xgz8b" Jan 21 15:28:28 crc kubenswrapper[5021]: I0121 15:28:28.013165 5021 generic.go:334] "Generic (PLEG): container finished" podID="f987e070-06b8-47d4-bc4a-441649d5d9e9" containerID="59e08f9932c629b444021184bf4752760852993e87e2cf812b1baeb6672f5d9e" exitCode=0 Jan 21 15:28:28 crc kubenswrapper[5021]: I0121 15:28:28.013244 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zxtgj" event={"ID":"f987e070-06b8-47d4-bc4a-441649d5d9e9","Type":"ContainerDied","Data":"59e08f9932c629b444021184bf4752760852993e87e2cf812b1baeb6672f5d9e"} Jan 21 15:28:28 crc kubenswrapper[5021]: I0121 15:28:28.013297 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zxtgj" event={"ID":"f987e070-06b8-47d4-bc4a-441649d5d9e9","Type":"ContainerStarted","Data":"433ba8185d52352c059c7cf3a727902e5747f931169eb0e26f062e4520c46554"} Jan 21 15:28:28 crc kubenswrapper[5021]: I0121 15:28:28.018677 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rtgws\" (UniqueName: \"kubernetes.io/projected/b45643f8-e7b2-4c4f-8da1-b4e753886a05-kube-api-access-rtgws\") pod \"redhat-operators-xgz8b\" (UID: \"b45643f8-e7b2-4c4f-8da1-b4e753886a05\") " pod="openshift-marketplace/redhat-operators-xgz8b" Jan 21 15:28:28 crc kubenswrapper[5021]: I0121 15:28:28.018733 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b45643f8-e7b2-4c4f-8da1-b4e753886a05-catalog-content\") pod \"redhat-operators-xgz8b\" (UID: \"b45643f8-e7b2-4c4f-8da1-b4e753886a05\") " pod="openshift-marketplace/redhat-operators-xgz8b" Jan 21 15:28:28 crc kubenswrapper[5021]: I0121 15:28:28.018772 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b45643f8-e7b2-4c4f-8da1-b4e753886a05-utilities\") pod \"redhat-operators-xgz8b\" (UID: \"b45643f8-e7b2-4c4f-8da1-b4e753886a05\") " pod="openshift-marketplace/redhat-operators-xgz8b" Jan 21 15:28:28 crc kubenswrapper[5021]: I0121 15:28:28.019244 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b45643f8-e7b2-4c4f-8da1-b4e753886a05-utilities\") pod \"redhat-operators-xgz8b\" (UID: \"b45643f8-e7b2-4c4f-8da1-b4e753886a05\") " pod="openshift-marketplace/redhat-operators-xgz8b" Jan 21 15:28:28 crc kubenswrapper[5021]: I0121 15:28:28.020041 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b45643f8-e7b2-4c4f-8da1-b4e753886a05-catalog-content\") pod \"redhat-operators-xgz8b\" (UID: \"b45643f8-e7b2-4c4f-8da1-b4e753886a05\") " pod="openshift-marketplace/redhat-operators-xgz8b" Jan 21 15:28:28 crc kubenswrapper[5021]: I0121 15:28:28.043572 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rtgws\" (UniqueName: \"kubernetes.io/projected/b45643f8-e7b2-4c4f-8da1-b4e753886a05-kube-api-access-rtgws\") pod \"redhat-operators-xgz8b\" (UID: \"b45643f8-e7b2-4c4f-8da1-b4e753886a05\") " pod="openshift-marketplace/redhat-operators-xgz8b" Jan 21 15:28:28 crc kubenswrapper[5021]: I0121 15:28:28.114255 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xgz8b" Jan 21 15:28:28 crc kubenswrapper[5021]: I0121 15:28:28.537511 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xgz8b"] Jan 21 15:28:29 crc kubenswrapper[5021]: I0121 15:28:29.021483 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zxtgj" event={"ID":"f987e070-06b8-47d4-bc4a-441649d5d9e9","Type":"ContainerStarted","Data":"4c9f152a5910eed481662dcf98308d1c00a32f8fc03c16840b27cd47fb23d0fd"} Jan 21 15:28:29 crc kubenswrapper[5021]: I0121 15:28:29.023294 5021 generic.go:334] "Generic (PLEG): container finished" podID="b45643f8-e7b2-4c4f-8da1-b4e753886a05" containerID="aa7cc5722a2df6f9a2371db1320fc16d4deb2c86cdcb0c95c2461094504e4ae4" exitCode=0 Jan 21 15:28:29 crc kubenswrapper[5021]: I0121 15:28:29.023387 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xgz8b" event={"ID":"b45643f8-e7b2-4c4f-8da1-b4e753886a05","Type":"ContainerDied","Data":"aa7cc5722a2df6f9a2371db1320fc16d4deb2c86cdcb0c95c2461094504e4ae4"} Jan 21 15:28:29 crc kubenswrapper[5021]: I0121 15:28:29.023500 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xgz8b" event={"ID":"b45643f8-e7b2-4c4f-8da1-b4e753886a05","Type":"ContainerStarted","Data":"6a512474f74f74fedbef42f9b03855b7eb45c9595651b2080ea4c62d268ab402"} Jan 21 15:28:29 crc kubenswrapper[5021]: I0121 15:28:29.594887 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-vl5k6"] Jan 21 15:28:29 crc kubenswrapper[5021]: I0121 15:28:29.596363 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vl5k6" Jan 21 15:28:29 crc kubenswrapper[5021]: I0121 15:28:29.598308 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 21 15:28:29 crc kubenswrapper[5021]: I0121 15:28:29.607616 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vl5k6"] Jan 21 15:28:29 crc kubenswrapper[5021]: I0121 15:28:29.739302 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9520cbac-26ed-42dc-a4f7-b2cbd670e722-catalog-content\") pod \"certified-operators-vl5k6\" (UID: \"9520cbac-26ed-42dc-a4f7-b2cbd670e722\") " pod="openshift-marketplace/certified-operators-vl5k6" Jan 21 15:28:29 crc kubenswrapper[5021]: I0121 15:28:29.739385 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9520cbac-26ed-42dc-a4f7-b2cbd670e722-utilities\") pod \"certified-operators-vl5k6\" (UID: \"9520cbac-26ed-42dc-a4f7-b2cbd670e722\") " pod="openshift-marketplace/certified-operators-vl5k6" Jan 21 15:28:29 crc kubenswrapper[5021]: I0121 15:28:29.739594 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n64bb\" (UniqueName: \"kubernetes.io/projected/9520cbac-26ed-42dc-a4f7-b2cbd670e722-kube-api-access-n64bb\") pod \"certified-operators-vl5k6\" (UID: \"9520cbac-26ed-42dc-a4f7-b2cbd670e722\") " pod="openshift-marketplace/certified-operators-vl5k6" Jan 21 15:28:29 crc kubenswrapper[5021]: I0121 15:28:29.841324 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9520cbac-26ed-42dc-a4f7-b2cbd670e722-catalog-content\") pod \"certified-operators-vl5k6\" (UID: \"9520cbac-26ed-42dc-a4f7-b2cbd670e722\") " pod="openshift-marketplace/certified-operators-vl5k6" Jan 21 15:28:29 crc kubenswrapper[5021]: I0121 15:28:29.841389 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9520cbac-26ed-42dc-a4f7-b2cbd670e722-utilities\") pod \"certified-operators-vl5k6\" (UID: \"9520cbac-26ed-42dc-a4f7-b2cbd670e722\") " pod="openshift-marketplace/certified-operators-vl5k6" Jan 21 15:28:29 crc kubenswrapper[5021]: I0121 15:28:29.841428 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n64bb\" (UniqueName: \"kubernetes.io/projected/9520cbac-26ed-42dc-a4f7-b2cbd670e722-kube-api-access-n64bb\") pod \"certified-operators-vl5k6\" (UID: \"9520cbac-26ed-42dc-a4f7-b2cbd670e722\") " pod="openshift-marketplace/certified-operators-vl5k6" Jan 21 15:28:29 crc kubenswrapper[5021]: I0121 15:28:29.841945 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9520cbac-26ed-42dc-a4f7-b2cbd670e722-catalog-content\") pod \"certified-operators-vl5k6\" (UID: \"9520cbac-26ed-42dc-a4f7-b2cbd670e722\") " pod="openshift-marketplace/certified-operators-vl5k6" Jan 21 15:28:29 crc kubenswrapper[5021]: I0121 15:28:29.842030 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9520cbac-26ed-42dc-a4f7-b2cbd670e722-utilities\") pod \"certified-operators-vl5k6\" (UID: \"9520cbac-26ed-42dc-a4f7-b2cbd670e722\") " pod="openshift-marketplace/certified-operators-vl5k6" Jan 21 15:28:29 crc kubenswrapper[5021]: I0121 15:28:29.862148 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n64bb\" (UniqueName: \"kubernetes.io/projected/9520cbac-26ed-42dc-a4f7-b2cbd670e722-kube-api-access-n64bb\") pod \"certified-operators-vl5k6\" (UID: \"9520cbac-26ed-42dc-a4f7-b2cbd670e722\") " pod="openshift-marketplace/certified-operators-vl5k6" Jan 21 15:28:29 crc kubenswrapper[5021]: I0121 15:28:29.916499 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vl5k6" Jan 21 15:28:30 crc kubenswrapper[5021]: I0121 15:28:30.030644 5021 generic.go:334] "Generic (PLEG): container finished" podID="f987e070-06b8-47d4-bc4a-441649d5d9e9" containerID="4c9f152a5910eed481662dcf98308d1c00a32f8fc03c16840b27cd47fb23d0fd" exitCode=0 Jan 21 15:28:30 crc kubenswrapper[5021]: I0121 15:28:30.030697 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zxtgj" event={"ID":"f987e070-06b8-47d4-bc4a-441649d5d9e9","Type":"ContainerDied","Data":"4c9f152a5910eed481662dcf98308d1c00a32f8fc03c16840b27cd47fb23d0fd"} Jan 21 15:28:30 crc kubenswrapper[5021]: I0121 15:28:30.202055 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-qbdg4"] Jan 21 15:28:30 crc kubenswrapper[5021]: I0121 15:28:30.203844 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qbdg4" Jan 21 15:28:30 crc kubenswrapper[5021]: I0121 15:28:30.206180 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 21 15:28:30 crc kubenswrapper[5021]: I0121 15:28:30.208712 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qbdg4"] Jan 21 15:28:30 crc kubenswrapper[5021]: I0121 15:28:30.327190 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vl5k6"] Jan 21 15:28:30 crc kubenswrapper[5021]: I0121 15:28:30.349329 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/da685ff8-e5ae-4316-914d-690f55d41325-catalog-content\") pod \"redhat-marketplace-qbdg4\" (UID: \"da685ff8-e5ae-4316-914d-690f55d41325\") " pod="openshift-marketplace/redhat-marketplace-qbdg4" Jan 21 15:28:30 crc kubenswrapper[5021]: I0121 15:28:30.349409 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/da685ff8-e5ae-4316-914d-690f55d41325-utilities\") pod \"redhat-marketplace-qbdg4\" (UID: \"da685ff8-e5ae-4316-914d-690f55d41325\") " pod="openshift-marketplace/redhat-marketplace-qbdg4" Jan 21 15:28:30 crc kubenswrapper[5021]: I0121 15:28:30.349475 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rr6w8\" (UniqueName: \"kubernetes.io/projected/da685ff8-e5ae-4316-914d-690f55d41325-kube-api-access-rr6w8\") pod \"redhat-marketplace-qbdg4\" (UID: \"da685ff8-e5ae-4316-914d-690f55d41325\") " pod="openshift-marketplace/redhat-marketplace-qbdg4" Jan 21 15:28:30 crc kubenswrapper[5021]: I0121 15:28:30.450966 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/da685ff8-e5ae-4316-914d-690f55d41325-utilities\") pod \"redhat-marketplace-qbdg4\" (UID: \"da685ff8-e5ae-4316-914d-690f55d41325\") " pod="openshift-marketplace/redhat-marketplace-qbdg4" Jan 21 15:28:30 crc kubenswrapper[5021]: I0121 15:28:30.451086 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rr6w8\" (UniqueName: \"kubernetes.io/projected/da685ff8-e5ae-4316-914d-690f55d41325-kube-api-access-rr6w8\") pod \"redhat-marketplace-qbdg4\" (UID: \"da685ff8-e5ae-4316-914d-690f55d41325\") " pod="openshift-marketplace/redhat-marketplace-qbdg4" Jan 21 15:28:30 crc kubenswrapper[5021]: I0121 15:28:30.451126 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/da685ff8-e5ae-4316-914d-690f55d41325-catalog-content\") pod \"redhat-marketplace-qbdg4\" (UID: \"da685ff8-e5ae-4316-914d-690f55d41325\") " pod="openshift-marketplace/redhat-marketplace-qbdg4" Jan 21 15:28:30 crc kubenswrapper[5021]: I0121 15:28:30.451522 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/da685ff8-e5ae-4316-914d-690f55d41325-utilities\") pod \"redhat-marketplace-qbdg4\" (UID: \"da685ff8-e5ae-4316-914d-690f55d41325\") " pod="openshift-marketplace/redhat-marketplace-qbdg4" Jan 21 15:28:30 crc kubenswrapper[5021]: I0121 15:28:30.451643 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/da685ff8-e5ae-4316-914d-690f55d41325-catalog-content\") pod \"redhat-marketplace-qbdg4\" (UID: \"da685ff8-e5ae-4316-914d-690f55d41325\") " pod="openshift-marketplace/redhat-marketplace-qbdg4" Jan 21 15:28:30 crc kubenswrapper[5021]: I0121 15:28:30.474943 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rr6w8\" (UniqueName: \"kubernetes.io/projected/da685ff8-e5ae-4316-914d-690f55d41325-kube-api-access-rr6w8\") pod \"redhat-marketplace-qbdg4\" (UID: \"da685ff8-e5ae-4316-914d-690f55d41325\") " pod="openshift-marketplace/redhat-marketplace-qbdg4" Jan 21 15:28:30 crc kubenswrapper[5021]: I0121 15:28:30.539366 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qbdg4" Jan 21 15:28:30 crc kubenswrapper[5021]: I0121 15:28:30.758146 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qbdg4"] Jan 21 15:28:30 crc kubenswrapper[5021]: W0121 15:28:30.761185 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podda685ff8_e5ae_4316_914d_690f55d41325.slice/crio-80a83392e7929584d7874a40002b043d2a7d42f8fe5bda5a7124141a58c74f06 WatchSource:0}: Error finding container 80a83392e7929584d7874a40002b043d2a7d42f8fe5bda5a7124141a58c74f06: Status 404 returned error can't find the container with id 80a83392e7929584d7874a40002b043d2a7d42f8fe5bda5a7124141a58c74f06 Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.040695 5021 generic.go:334] "Generic (PLEG): container finished" podID="da685ff8-e5ae-4316-914d-690f55d41325" containerID="063cbe3a65a90db98cdf0797770015c414e4812e1e07ffca9fe9be8bb4644e01" exitCode=0 Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.040781 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qbdg4" event={"ID":"da685ff8-e5ae-4316-914d-690f55d41325","Type":"ContainerDied","Data":"063cbe3a65a90db98cdf0797770015c414e4812e1e07ffca9fe9be8bb4644e01"} Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.040820 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qbdg4" event={"ID":"da685ff8-e5ae-4316-914d-690f55d41325","Type":"ContainerStarted","Data":"80a83392e7929584d7874a40002b043d2a7d42f8fe5bda5a7124141a58c74f06"} Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.048897 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zxtgj" event={"ID":"f987e070-06b8-47d4-bc4a-441649d5d9e9","Type":"ContainerStarted","Data":"bd4db441991b7cca1106b89502b19325bbb6f5da5a4b52641323f07ad941716b"} Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.052293 5021 generic.go:334] "Generic (PLEG): container finished" podID="9520cbac-26ed-42dc-a4f7-b2cbd670e722" containerID="40ac8a11a7adf0620cd3206175f520859d54a3cc4694d38fc21e9957dc37404b" exitCode=0 Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.052374 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vl5k6" event={"ID":"9520cbac-26ed-42dc-a4f7-b2cbd670e722","Type":"ContainerDied","Data":"40ac8a11a7adf0620cd3206175f520859d54a3cc4694d38fc21e9957dc37404b"} Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.052411 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vl5k6" event={"ID":"9520cbac-26ed-42dc-a4f7-b2cbd670e722","Type":"ContainerStarted","Data":"84e40720c770c432ab16c800b5559d8524df3499459549317f1a835631f43ffe"} Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.055318 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xgz8b" event={"ID":"b45643f8-e7b2-4c4f-8da1-b4e753886a05","Type":"ContainerStarted","Data":"7d9c1c90155b80252f5481c73587f8d5db1becbe61d209bacf3d6e85443aa912"} Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.118387 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-zxtgj" podStartSLOduration=1.6980621409999999 podStartE2EDuration="4.118360889s" podCreationTimestamp="2026-01-21 15:28:27 +0000 UTC" firstStartedPulling="2026-01-21 15:28:28.015216015 +0000 UTC m=+249.550329914" lastFinishedPulling="2026-01-21 15:28:30.435514773 +0000 UTC m=+251.970628662" observedRunningTime="2026-01-21 15:28:31.116682907 +0000 UTC m=+252.651796796" watchObservedRunningTime="2026-01-21 15:28:31.118360889 +0000 UTC m=+252.653474778" Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.658635 5021 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.659437 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://bea603621cfb30c2b0a1dc2021ea515cb87cd590232f1c4696341f7eca1d4535" gracePeriod=15 Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.659461 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://06cf2acfbc236dc530bb69dff9e6e83755c426daba101b6b6abe9c23684d2b7d" gracePeriod=15 Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.659445 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://10f4a9dbab7b11fa3eaaae6060df484ff7e212710188c29acc1834f07310d49f" gracePeriod=15 Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.659706 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://39f53539825ef70fcde1b61f8e650f22c059fd674523bfe1882b9844923ce9c1" gracePeriod=15 Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.661543 5021 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 21 15:28:31 crc kubenswrapper[5021]: E0121 15:28:31.661861 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.661884 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 21 15:28:31 crc kubenswrapper[5021]: E0121 15:28:31.661897 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.661926 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 21 15:28:31 crc kubenswrapper[5021]: E0121 15:28:31.661937 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.661948 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 21 15:28:31 crc kubenswrapper[5021]: E0121 15:28:31.661963 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.661969 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Jan 21 15:28:31 crc kubenswrapper[5021]: E0121 15:28:31.661979 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.661985 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 21 15:28:31 crc kubenswrapper[5021]: E0121 15:28:31.661994 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.662000 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 21 15:28:31 crc kubenswrapper[5021]: E0121 15:28:31.662011 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.662018 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.662117 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.662127 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.662136 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.662144 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.662153 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.662160 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.659567 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://cbcf60368c95e2747fd275984fc6cfc714ca37c93d92edc446643a41c4022c56" gracePeriod=15 Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.664119 5021 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.665191 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.671054 5021 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="f4b27818a5e8e43d0dc095d08835c792" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.768198 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.768253 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.768283 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.768312 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.768342 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.768370 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.768410 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.768436 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.869137 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.869222 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.869257 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.869323 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.869342 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.869357 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.869378 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.869384 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.869438 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.869471 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.869429 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.869504 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.869536 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.869547 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.869401 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 21 15:28:31 crc kubenswrapper[5021]: I0121 15:28:31.869403 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 21 15:28:32 crc kubenswrapper[5021]: I0121 15:28:32.063173 5021 generic.go:334] "Generic (PLEG): container finished" podID="b45643f8-e7b2-4c4f-8da1-b4e753886a05" containerID="7d9c1c90155b80252f5481c73587f8d5db1becbe61d209bacf3d6e85443aa912" exitCode=0 Jan 21 15:28:32 crc kubenswrapper[5021]: I0121 15:28:32.063243 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xgz8b" event={"ID":"b45643f8-e7b2-4c4f-8da1-b4e753886a05","Type":"ContainerDied","Data":"7d9c1c90155b80252f5481c73587f8d5db1becbe61d209bacf3d6e85443aa912"} Jan 21 15:28:32 crc kubenswrapper[5021]: I0121 15:28:32.066045 5021 status_manager.go:851] "Failed to get status for pod" podUID="b45643f8-e7b2-4c4f-8da1-b4e753886a05" pod="openshift-marketplace/redhat-operators-xgz8b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-xgz8b\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:32 crc kubenswrapper[5021]: E0121 15:28:32.066680 5021 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/events\": dial tcp 38.102.83.110:6443: connect: connection refused" event="&Event{ObjectMeta:{redhat-operators-xgz8b.188cc898c20d5d60 openshift-marketplace 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-marketplace,Name:redhat-operators-xgz8b,UID:b45643f8-e7b2-4c4f-8da1-b4e753886a05,APIVersion:v1,ResourceVersion:29602,FieldPath:spec.containers{registry-server},},Reason:Pulling,Message:Pulling image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\",Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-21 15:28:32.066002272 +0000 UTC m=+253.601116161,LastTimestamp:2026-01-21 15:28:32.066002272 +0000 UTC m=+253.601116161,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 21 15:28:33 crc kubenswrapper[5021]: I0121 15:28:33.072183 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xgz8b" event={"ID":"b45643f8-e7b2-4c4f-8da1-b4e753886a05","Type":"ContainerStarted","Data":"f1298cde1b5a7bccf7aa0dcbc11445147a1ad6c0f7b17acb2ea0cafbe45ad423"} Jan 21 15:28:33 crc kubenswrapper[5021]: I0121 15:28:33.073079 5021 status_manager.go:851] "Failed to get status for pod" podUID="b45643f8-e7b2-4c4f-8da1-b4e753886a05" pod="openshift-marketplace/redhat-operators-xgz8b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-xgz8b\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:33 crc kubenswrapper[5021]: I0121 15:28:33.075119 5021 generic.go:334] "Generic (PLEG): container finished" podID="da685ff8-e5ae-4316-914d-690f55d41325" containerID="1edfa83c2c7d95d2bd32ad58f34a84355679a4f954edd11b812f4259fe997a15" exitCode=0 Jan 21 15:28:33 crc kubenswrapper[5021]: I0121 15:28:33.075192 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qbdg4" event={"ID":"da685ff8-e5ae-4316-914d-690f55d41325","Type":"ContainerDied","Data":"1edfa83c2c7d95d2bd32ad58f34a84355679a4f954edd11b812f4259fe997a15"} Jan 21 15:28:33 crc kubenswrapper[5021]: I0121 15:28:33.076826 5021 status_manager.go:851] "Failed to get status for pod" podUID="da685ff8-e5ae-4316-914d-690f55d41325" pod="openshift-marketplace/redhat-marketplace-qbdg4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-qbdg4\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:33 crc kubenswrapper[5021]: I0121 15:28:33.077329 5021 status_manager.go:851] "Failed to get status for pod" podUID="b45643f8-e7b2-4c4f-8da1-b4e753886a05" pod="openshift-marketplace/redhat-operators-xgz8b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-xgz8b\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:33 crc kubenswrapper[5021]: I0121 15:28:33.080292 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 21 15:28:33 crc kubenswrapper[5021]: I0121 15:28:33.083459 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 21 15:28:33 crc kubenswrapper[5021]: I0121 15:28:33.084535 5021 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="39f53539825ef70fcde1b61f8e650f22c059fd674523bfe1882b9844923ce9c1" exitCode=0 Jan 21 15:28:33 crc kubenswrapper[5021]: I0121 15:28:33.084584 5021 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="06cf2acfbc236dc530bb69dff9e6e83755c426daba101b6b6abe9c23684d2b7d" exitCode=0 Jan 21 15:28:33 crc kubenswrapper[5021]: I0121 15:28:33.084596 5021 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="10f4a9dbab7b11fa3eaaae6060df484ff7e212710188c29acc1834f07310d49f" exitCode=0 Jan 21 15:28:33 crc kubenswrapper[5021]: I0121 15:28:33.084607 5021 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="cbcf60368c95e2747fd275984fc6cfc714ca37c93d92edc446643a41c4022c56" exitCode=2 Jan 21 15:28:33 crc kubenswrapper[5021]: I0121 15:28:33.084672 5021 scope.go:117] "RemoveContainer" containerID="6064978522c6ddd526c20221a2d4a73961beaea35b31eab9598087b1af017753" Jan 21 15:28:33 crc kubenswrapper[5021]: I0121 15:28:33.087419 5021 generic.go:334] "Generic (PLEG): container finished" podID="2a9fdb38-edc6-4315-a91c-9d8489695e24" containerID="5ab8ef4a9e4538f88456201fe6ea5cb38a6766ab02bb01eb73e844216d336304" exitCode=0 Jan 21 15:28:33 crc kubenswrapper[5021]: I0121 15:28:33.087503 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"2a9fdb38-edc6-4315-a91c-9d8489695e24","Type":"ContainerDied","Data":"5ab8ef4a9e4538f88456201fe6ea5cb38a6766ab02bb01eb73e844216d336304"} Jan 21 15:28:33 crc kubenswrapper[5021]: I0121 15:28:33.089193 5021 generic.go:334] "Generic (PLEG): container finished" podID="9520cbac-26ed-42dc-a4f7-b2cbd670e722" containerID="7eb45704db343c81b4dc71b0f0ac5a4bc16524a74b6c5431447e88cea7c2021d" exitCode=0 Jan 21 15:28:33 crc kubenswrapper[5021]: I0121 15:28:33.089220 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vl5k6" event={"ID":"9520cbac-26ed-42dc-a4f7-b2cbd670e722","Type":"ContainerDied","Data":"7eb45704db343c81b4dc71b0f0ac5a4bc16524a74b6c5431447e88cea7c2021d"} Jan 21 15:28:33 crc kubenswrapper[5021]: I0121 15:28:33.089262 5021 status_manager.go:851] "Failed to get status for pod" podUID="b45643f8-e7b2-4c4f-8da1-b4e753886a05" pod="openshift-marketplace/redhat-operators-xgz8b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-xgz8b\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:33 crc kubenswrapper[5021]: I0121 15:28:33.089465 5021 status_manager.go:851] "Failed to get status for pod" podUID="2a9fdb38-edc6-4315-a91c-9d8489695e24" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:33 crc kubenswrapper[5021]: I0121 15:28:33.089657 5021 status_manager.go:851] "Failed to get status for pod" podUID="da685ff8-e5ae-4316-914d-690f55d41325" pod="openshift-marketplace/redhat-marketplace-qbdg4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-qbdg4\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:33 crc kubenswrapper[5021]: I0121 15:28:33.089884 5021 status_manager.go:851] "Failed to get status for pod" podUID="2a9fdb38-edc6-4315-a91c-9d8489695e24" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:33 crc kubenswrapper[5021]: I0121 15:28:33.090127 5021 status_manager.go:851] "Failed to get status for pod" podUID="da685ff8-e5ae-4316-914d-690f55d41325" pod="openshift-marketplace/redhat-marketplace-qbdg4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-qbdg4\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:33 crc kubenswrapper[5021]: I0121 15:28:33.090273 5021 status_manager.go:851] "Failed to get status for pod" podUID="b45643f8-e7b2-4c4f-8da1-b4e753886a05" pod="openshift-marketplace/redhat-operators-xgz8b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-xgz8b\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:33 crc kubenswrapper[5021]: I0121 15:28:33.090418 5021 status_manager.go:851] "Failed to get status for pod" podUID="9520cbac-26ed-42dc-a4f7-b2cbd670e722" pod="openshift-marketplace/certified-operators-vl5k6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-vl5k6\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:34 crc kubenswrapper[5021]: E0121 15:28:34.052063 5021 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:34 crc kubenswrapper[5021]: E0121 15:28:34.053120 5021 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:34 crc kubenswrapper[5021]: E0121 15:28:34.053614 5021 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:34 crc kubenswrapper[5021]: E0121 15:28:34.054053 5021 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:34 crc kubenswrapper[5021]: E0121 15:28:34.054347 5021 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:34 crc kubenswrapper[5021]: I0121 15:28:34.054399 5021 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Jan 21 15:28:34 crc kubenswrapper[5021]: E0121 15:28:34.054684 5021 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.110:6443: connect: connection refused" interval="200ms" Jan 21 15:28:34 crc kubenswrapper[5021]: I0121 15:28:34.097243 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vl5k6" event={"ID":"9520cbac-26ed-42dc-a4f7-b2cbd670e722","Type":"ContainerStarted","Data":"e2f3f18fa2fa79a71bd724ec942ce5e711485e83b6ba600f29d6b872811cbddb"} Jan 21 15:28:34 crc kubenswrapper[5021]: I0121 15:28:34.098138 5021 status_manager.go:851] "Failed to get status for pod" podUID="2a9fdb38-edc6-4315-a91c-9d8489695e24" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:34 crc kubenswrapper[5021]: I0121 15:28:34.098532 5021 status_manager.go:851] "Failed to get status for pod" podUID="da685ff8-e5ae-4316-914d-690f55d41325" pod="openshift-marketplace/redhat-marketplace-qbdg4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-qbdg4\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:34 crc kubenswrapper[5021]: I0121 15:28:34.098710 5021 status_manager.go:851] "Failed to get status for pod" podUID="b45643f8-e7b2-4c4f-8da1-b4e753886a05" pod="openshift-marketplace/redhat-operators-xgz8b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-xgz8b\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:34 crc kubenswrapper[5021]: I0121 15:28:34.099097 5021 status_manager.go:851] "Failed to get status for pod" podUID="9520cbac-26ed-42dc-a4f7-b2cbd670e722" pod="openshift-marketplace/certified-operators-vl5k6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-vl5k6\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:34 crc kubenswrapper[5021]: I0121 15:28:34.100876 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qbdg4" event={"ID":"da685ff8-e5ae-4316-914d-690f55d41325","Type":"ContainerStarted","Data":"acb1ddc800cbee9fff93df33d03265d4f417f100c6a669c66adea2ca1e5eefcc"} Jan 21 15:28:34 crc kubenswrapper[5021]: I0121 15:28:34.101705 5021 status_manager.go:851] "Failed to get status for pod" podUID="9520cbac-26ed-42dc-a4f7-b2cbd670e722" pod="openshift-marketplace/certified-operators-vl5k6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-vl5k6\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:34 crc kubenswrapper[5021]: I0121 15:28:34.102128 5021 status_manager.go:851] "Failed to get status for pod" podUID="2a9fdb38-edc6-4315-a91c-9d8489695e24" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:34 crc kubenswrapper[5021]: I0121 15:28:34.102575 5021 status_manager.go:851] "Failed to get status for pod" podUID="da685ff8-e5ae-4316-914d-690f55d41325" pod="openshift-marketplace/redhat-marketplace-qbdg4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-qbdg4\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:34 crc kubenswrapper[5021]: I0121 15:28:34.102813 5021 status_manager.go:851] "Failed to get status for pod" podUID="b45643f8-e7b2-4c4f-8da1-b4e753886a05" pod="openshift-marketplace/redhat-operators-xgz8b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-xgz8b\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:34 crc kubenswrapper[5021]: I0121 15:28:34.104481 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 21 15:28:34 crc kubenswrapper[5021]: I0121 15:28:34.105299 5021 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="bea603621cfb30c2b0a1dc2021ea515cb87cd590232f1c4696341f7eca1d4535" exitCode=0 Jan 21 15:28:34 crc kubenswrapper[5021]: I0121 15:28:34.122899 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 21 15:28:34 crc kubenswrapper[5021]: I0121 15:28:34.123652 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 15:28:34 crc kubenswrapper[5021]: I0121 15:28:34.124232 5021 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:34 crc kubenswrapper[5021]: I0121 15:28:34.124403 5021 status_manager.go:851] "Failed to get status for pod" podUID="2a9fdb38-edc6-4315-a91c-9d8489695e24" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:34 crc kubenswrapper[5021]: I0121 15:28:34.124661 5021 status_manager.go:851] "Failed to get status for pod" podUID="da685ff8-e5ae-4316-914d-690f55d41325" pod="openshift-marketplace/redhat-marketplace-qbdg4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-qbdg4\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:34 crc kubenswrapper[5021]: I0121 15:28:34.125035 5021 status_manager.go:851] "Failed to get status for pod" podUID="b45643f8-e7b2-4c4f-8da1-b4e753886a05" pod="openshift-marketplace/redhat-operators-xgz8b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-xgz8b\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:34 crc kubenswrapper[5021]: I0121 15:28:34.125190 5021 status_manager.go:851] "Failed to get status for pod" podUID="9520cbac-26ed-42dc-a4f7-b2cbd670e722" pod="openshift-marketplace/certified-operators-vl5k6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-vl5k6\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:34 crc kubenswrapper[5021]: E0121 15:28:34.255575 5021 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.110:6443: connect: connection refused" interval="400ms" Jan 21 15:28:34 crc kubenswrapper[5021]: I0121 15:28:34.315430 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 21 15:28:34 crc kubenswrapper[5021]: I0121 15:28:34.315861 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 21 15:28:34 crc kubenswrapper[5021]: I0121 15:28:34.315933 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 21 15:28:34 crc kubenswrapper[5021]: I0121 15:28:34.316008 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 15:28:34 crc kubenswrapper[5021]: I0121 15:28:34.316067 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 15:28:34 crc kubenswrapper[5021]: I0121 15:28:34.316164 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 15:28:34 crc kubenswrapper[5021]: I0121 15:28:34.316306 5021 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 21 15:28:34 crc kubenswrapper[5021]: I0121 15:28:34.316324 5021 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Jan 21 15:28:34 crc kubenswrapper[5021]: I0121 15:28:34.316336 5021 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Jan 21 15:28:34 crc kubenswrapper[5021]: E0121 15:28:34.656048 5021 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.110:6443: connect: connection refused" interval="800ms" Jan 21 15:28:34 crc kubenswrapper[5021]: I0121 15:28:34.748461 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Jan 21 15:28:35 crc kubenswrapper[5021]: I0121 15:28:35.114702 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"2a9fdb38-edc6-4315-a91c-9d8489695e24","Type":"ContainerDied","Data":"f8b209fe15db0a8e419900b6e2120eacf73824b72b3fdfcb0e9aedf95dcfe285"} Jan 21 15:28:35 crc kubenswrapper[5021]: I0121 15:28:35.114759 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f8b209fe15db0a8e419900b6e2120eacf73824b72b3fdfcb0e9aedf95dcfe285" Jan 21 15:28:35 crc kubenswrapper[5021]: I0121 15:28:35.118353 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 21 15:28:35 crc kubenswrapper[5021]: I0121 15:28:35.119616 5021 scope.go:117] "RemoveContainer" containerID="39f53539825ef70fcde1b61f8e650f22c059fd674523bfe1882b9844923ce9c1" Jan 21 15:28:35 crc kubenswrapper[5021]: I0121 15:28:35.119872 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 15:28:35 crc kubenswrapper[5021]: I0121 15:28:35.120893 5021 status_manager.go:851] "Failed to get status for pod" podUID="9520cbac-26ed-42dc-a4f7-b2cbd670e722" pod="openshift-marketplace/certified-operators-vl5k6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-vl5k6\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:35 crc kubenswrapper[5021]: I0121 15:28:35.121229 5021 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:35 crc kubenswrapper[5021]: I0121 15:28:35.121522 5021 status_manager.go:851] "Failed to get status for pod" podUID="2a9fdb38-edc6-4315-a91c-9d8489695e24" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:35 crc kubenswrapper[5021]: I0121 15:28:35.121763 5021 status_manager.go:851] "Failed to get status for pod" podUID="da685ff8-e5ae-4316-914d-690f55d41325" pod="openshift-marketplace/redhat-marketplace-qbdg4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-qbdg4\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:35 crc kubenswrapper[5021]: I0121 15:28:35.122573 5021 status_manager.go:851] "Failed to get status for pod" podUID="b45643f8-e7b2-4c4f-8da1-b4e753886a05" pod="openshift-marketplace/redhat-operators-xgz8b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-xgz8b\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:35 crc kubenswrapper[5021]: E0121 15:28:35.456755 5021 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.110:6443: connect: connection refused" interval="1.6s" Jan 21 15:28:35 crc kubenswrapper[5021]: I0121 15:28:35.585597 5021 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:35 crc kubenswrapper[5021]: I0121 15:28:35.586484 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 21 15:28:35 crc kubenswrapper[5021]: I0121 15:28:35.586494 5021 scope.go:117] "RemoveContainer" containerID="06cf2acfbc236dc530bb69dff9e6e83755c426daba101b6b6abe9c23684d2b7d" Jan 21 15:28:35 crc kubenswrapper[5021]: I0121 15:28:35.586878 5021 status_manager.go:851] "Failed to get status for pod" podUID="2a9fdb38-edc6-4315-a91c-9d8489695e24" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:35 crc kubenswrapper[5021]: I0121 15:28:35.587116 5021 status_manager.go:851] "Failed to get status for pod" podUID="da685ff8-e5ae-4316-914d-690f55d41325" pod="openshift-marketplace/redhat-marketplace-qbdg4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-qbdg4\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:35 crc kubenswrapper[5021]: I0121 15:28:35.587288 5021 status_manager.go:851] "Failed to get status for pod" podUID="b45643f8-e7b2-4c4f-8da1-b4e753886a05" pod="openshift-marketplace/redhat-operators-xgz8b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-xgz8b\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:35 crc kubenswrapper[5021]: I0121 15:28:35.587439 5021 status_manager.go:851] "Failed to get status for pod" podUID="9520cbac-26ed-42dc-a4f7-b2cbd670e722" pod="openshift-marketplace/certified-operators-vl5k6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-vl5k6\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:35 crc kubenswrapper[5021]: I0121 15:28:35.587779 5021 status_manager.go:851] "Failed to get status for pod" podUID="da685ff8-e5ae-4316-914d-690f55d41325" pod="openshift-marketplace/redhat-marketplace-qbdg4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-qbdg4\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:35 crc kubenswrapper[5021]: I0121 15:28:35.588284 5021 status_manager.go:851] "Failed to get status for pod" podUID="b45643f8-e7b2-4c4f-8da1-b4e753886a05" pod="openshift-marketplace/redhat-operators-xgz8b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-xgz8b\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:35 crc kubenswrapper[5021]: I0121 15:28:35.588610 5021 status_manager.go:851] "Failed to get status for pod" podUID="9520cbac-26ed-42dc-a4f7-b2cbd670e722" pod="openshift-marketplace/certified-operators-vl5k6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-vl5k6\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:35 crc kubenswrapper[5021]: I0121 15:28:35.588825 5021 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:35 crc kubenswrapper[5021]: I0121 15:28:35.589046 5021 status_manager.go:851] "Failed to get status for pod" podUID="2a9fdb38-edc6-4315-a91c-9d8489695e24" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:35 crc kubenswrapper[5021]: I0121 15:28:35.601964 5021 scope.go:117] "RemoveContainer" containerID="10f4a9dbab7b11fa3eaaae6060df484ff7e212710188c29acc1834f07310d49f" Jan 21 15:28:35 crc kubenswrapper[5021]: I0121 15:28:35.615333 5021 scope.go:117] "RemoveContainer" containerID="cbcf60368c95e2747fd275984fc6cfc714ca37c93d92edc446643a41c4022c56" Jan 21 15:28:35 crc kubenswrapper[5021]: I0121 15:28:35.627547 5021 scope.go:117] "RemoveContainer" containerID="bea603621cfb30c2b0a1dc2021ea515cb87cd590232f1c4696341f7eca1d4535" Jan 21 15:28:35 crc kubenswrapper[5021]: I0121 15:28:35.644073 5021 scope.go:117] "RemoveContainer" containerID="d364e35fde4ad55052ad651b55279f24a55401b87de16eff36e5890a75a90cc9" Jan 21 15:28:35 crc kubenswrapper[5021]: I0121 15:28:35.736312 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/2a9fdb38-edc6-4315-a91c-9d8489695e24-var-lock\") pod \"2a9fdb38-edc6-4315-a91c-9d8489695e24\" (UID: \"2a9fdb38-edc6-4315-a91c-9d8489695e24\") " Jan 21 15:28:35 crc kubenswrapper[5021]: I0121 15:28:35.736763 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2a9fdb38-edc6-4315-a91c-9d8489695e24-kubelet-dir\") pod \"2a9fdb38-edc6-4315-a91c-9d8489695e24\" (UID: \"2a9fdb38-edc6-4315-a91c-9d8489695e24\") " Jan 21 15:28:35 crc kubenswrapper[5021]: I0121 15:28:35.736613 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2a9fdb38-edc6-4315-a91c-9d8489695e24-var-lock" (OuterVolumeSpecName: "var-lock") pod "2a9fdb38-edc6-4315-a91c-9d8489695e24" (UID: "2a9fdb38-edc6-4315-a91c-9d8489695e24"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 15:28:35 crc kubenswrapper[5021]: I0121 15:28:35.736826 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2a9fdb38-edc6-4315-a91c-9d8489695e24-kube-api-access\") pod \"2a9fdb38-edc6-4315-a91c-9d8489695e24\" (UID: \"2a9fdb38-edc6-4315-a91c-9d8489695e24\") " Jan 21 15:28:35 crc kubenswrapper[5021]: I0121 15:28:35.736860 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2a9fdb38-edc6-4315-a91c-9d8489695e24-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "2a9fdb38-edc6-4315-a91c-9d8489695e24" (UID: "2a9fdb38-edc6-4315-a91c-9d8489695e24"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 15:28:35 crc kubenswrapper[5021]: I0121 15:28:35.737038 5021 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2a9fdb38-edc6-4315-a91c-9d8489695e24-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 21 15:28:35 crc kubenswrapper[5021]: I0121 15:28:35.737062 5021 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/2a9fdb38-edc6-4315-a91c-9d8489695e24-var-lock\") on node \"crc\" DevicePath \"\"" Jan 21 15:28:35 crc kubenswrapper[5021]: I0121 15:28:35.747550 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2a9fdb38-edc6-4315-a91c-9d8489695e24-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "2a9fdb38-edc6-4315-a91c-9d8489695e24" (UID: "2a9fdb38-edc6-4315-a91c-9d8489695e24"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:28:35 crc kubenswrapper[5021]: I0121 15:28:35.837612 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2a9fdb38-edc6-4315-a91c-9d8489695e24-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 21 15:28:36 crc kubenswrapper[5021]: E0121 15:28:36.003074 5021 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/events\": dial tcp 38.102.83.110:6443: connect: connection refused" event="&Event{ObjectMeta:{redhat-operators-xgz8b.188cc898c20d5d60 openshift-marketplace 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-marketplace,Name:redhat-operators-xgz8b,UID:b45643f8-e7b2-4c4f-8da1-b4e753886a05,APIVersion:v1,ResourceVersion:29602,FieldPath:spec.containers{registry-server},},Reason:Pulling,Message:Pulling image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\",Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-21 15:28:32.066002272 +0000 UTC m=+253.601116161,LastTimestamp:2026-01-21 15:28:32.066002272 +0000 UTC m=+253.601116161,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 21 15:28:36 crc kubenswrapper[5021]: I0121 15:28:36.126776 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 21 15:28:36 crc kubenswrapper[5021]: I0121 15:28:36.153517 5021 status_manager.go:851] "Failed to get status for pod" podUID="9520cbac-26ed-42dc-a4f7-b2cbd670e722" pod="openshift-marketplace/certified-operators-vl5k6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-vl5k6\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:36 crc kubenswrapper[5021]: I0121 15:28:36.153732 5021 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:36 crc kubenswrapper[5021]: I0121 15:28:36.154055 5021 status_manager.go:851] "Failed to get status for pod" podUID="2a9fdb38-edc6-4315-a91c-9d8489695e24" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:36 crc kubenswrapper[5021]: I0121 15:28:36.154287 5021 status_manager.go:851] "Failed to get status for pod" podUID="da685ff8-e5ae-4316-914d-690f55d41325" pod="openshift-marketplace/redhat-marketplace-qbdg4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-qbdg4\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:36 crc kubenswrapper[5021]: I0121 15:28:36.154498 5021 status_manager.go:851] "Failed to get status for pod" podUID="b45643f8-e7b2-4c4f-8da1-b4e753886a05" pod="openshift-marketplace/redhat-operators-xgz8b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-xgz8b\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:36 crc kubenswrapper[5021]: E0121 15:28:36.697450 5021 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.110:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 21 15:28:36 crc kubenswrapper[5021]: I0121 15:28:36.698708 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 21 15:28:37 crc kubenswrapper[5021]: E0121 15:28:37.057939 5021 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.110:6443: connect: connection refused" interval="3.2s" Jan 21 15:28:37 crc kubenswrapper[5021]: I0121 15:28:37.135244 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"e9f4273a34244d5c47a772b1b782aa00875aaa154d08e3180f8c2579557a980b"} Jan 21 15:28:37 crc kubenswrapper[5021]: I0121 15:28:37.524296 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-zxtgj" Jan 21 15:28:37 crc kubenswrapper[5021]: I0121 15:28:37.525233 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-zxtgj" Jan 21 15:28:37 crc kubenswrapper[5021]: I0121 15:28:37.573391 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-zxtgj" Jan 21 15:28:37 crc kubenswrapper[5021]: I0121 15:28:37.573893 5021 status_manager.go:851] "Failed to get status for pod" podUID="9520cbac-26ed-42dc-a4f7-b2cbd670e722" pod="openshift-marketplace/certified-operators-vl5k6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-vl5k6\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:37 crc kubenswrapper[5021]: I0121 15:28:37.574235 5021 status_manager.go:851] "Failed to get status for pod" podUID="2a9fdb38-edc6-4315-a91c-9d8489695e24" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:37 crc kubenswrapper[5021]: I0121 15:28:37.574823 5021 status_manager.go:851] "Failed to get status for pod" podUID="f987e070-06b8-47d4-bc4a-441649d5d9e9" pod="openshift-marketplace/community-operators-zxtgj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-zxtgj\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:37 crc kubenswrapper[5021]: I0121 15:28:37.575256 5021 status_manager.go:851] "Failed to get status for pod" podUID="da685ff8-e5ae-4316-914d-690f55d41325" pod="openshift-marketplace/redhat-marketplace-qbdg4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-qbdg4\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:37 crc kubenswrapper[5021]: I0121 15:28:37.575538 5021 status_manager.go:851] "Failed to get status for pod" podUID="b45643f8-e7b2-4c4f-8da1-b4e753886a05" pod="openshift-marketplace/redhat-operators-xgz8b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-xgz8b\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:38 crc kubenswrapper[5021]: I0121 15:28:38.114675 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-xgz8b" Jan 21 15:28:38 crc kubenswrapper[5021]: I0121 15:28:38.115131 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-xgz8b" Jan 21 15:28:38 crc kubenswrapper[5021]: I0121 15:28:38.198171 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-zxtgj" Jan 21 15:28:38 crc kubenswrapper[5021]: I0121 15:28:38.199833 5021 status_manager.go:851] "Failed to get status for pod" podUID="9520cbac-26ed-42dc-a4f7-b2cbd670e722" pod="openshift-marketplace/certified-operators-vl5k6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-vl5k6\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:38 crc kubenswrapper[5021]: I0121 15:28:38.200540 5021 status_manager.go:851] "Failed to get status for pod" podUID="2a9fdb38-edc6-4315-a91c-9d8489695e24" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:38 crc kubenswrapper[5021]: I0121 15:28:38.201300 5021 status_manager.go:851] "Failed to get status for pod" podUID="f987e070-06b8-47d4-bc4a-441649d5d9e9" pod="openshift-marketplace/community-operators-zxtgj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-zxtgj\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:38 crc kubenswrapper[5021]: I0121 15:28:38.201811 5021 status_manager.go:851] "Failed to get status for pod" podUID="da685ff8-e5ae-4316-914d-690f55d41325" pod="openshift-marketplace/redhat-marketplace-qbdg4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-qbdg4\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:38 crc kubenswrapper[5021]: I0121 15:28:38.202140 5021 status_manager.go:851] "Failed to get status for pod" podUID="b45643f8-e7b2-4c4f-8da1-b4e753886a05" pod="openshift-marketplace/redhat-operators-xgz8b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-xgz8b\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:38 crc kubenswrapper[5021]: I0121 15:28:38.741150 5021 status_manager.go:851] "Failed to get status for pod" podUID="9520cbac-26ed-42dc-a4f7-b2cbd670e722" pod="openshift-marketplace/certified-operators-vl5k6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-vl5k6\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:38 crc kubenswrapper[5021]: I0121 15:28:38.743091 5021 status_manager.go:851] "Failed to get status for pod" podUID="2a9fdb38-edc6-4315-a91c-9d8489695e24" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:38 crc kubenswrapper[5021]: I0121 15:28:38.743752 5021 status_manager.go:851] "Failed to get status for pod" podUID="f987e070-06b8-47d4-bc4a-441649d5d9e9" pod="openshift-marketplace/community-operators-zxtgj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-zxtgj\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:38 crc kubenswrapper[5021]: I0121 15:28:38.744088 5021 status_manager.go:851] "Failed to get status for pod" podUID="da685ff8-e5ae-4316-914d-690f55d41325" pod="openshift-marketplace/redhat-marketplace-qbdg4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-qbdg4\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:38 crc kubenswrapper[5021]: I0121 15:28:38.744502 5021 status_manager.go:851] "Failed to get status for pod" podUID="b45643f8-e7b2-4c4f-8da1-b4e753886a05" pod="openshift-marketplace/redhat-operators-xgz8b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-xgz8b\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:39 crc kubenswrapper[5021]: I0121 15:28:39.148697 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"7c93b9e7f5abcc8d6914afa41d2650d0b5c94699d5d9d416a3b9f350c27d0964"} Jan 21 15:28:39 crc kubenswrapper[5021]: I0121 15:28:39.149548 5021 status_manager.go:851] "Failed to get status for pod" podUID="b45643f8-e7b2-4c4f-8da1-b4e753886a05" pod="openshift-marketplace/redhat-operators-xgz8b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-xgz8b\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:39 crc kubenswrapper[5021]: E0121 15:28:39.149570 5021 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.110:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 21 15:28:39 crc kubenswrapper[5021]: I0121 15:28:39.150025 5021 status_manager.go:851] "Failed to get status for pod" podUID="9520cbac-26ed-42dc-a4f7-b2cbd670e722" pod="openshift-marketplace/certified-operators-vl5k6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-vl5k6\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:39 crc kubenswrapper[5021]: I0121 15:28:39.150251 5021 status_manager.go:851] "Failed to get status for pod" podUID="2a9fdb38-edc6-4315-a91c-9d8489695e24" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:39 crc kubenswrapper[5021]: I0121 15:28:39.150409 5021 status_manager.go:851] "Failed to get status for pod" podUID="f987e070-06b8-47d4-bc4a-441649d5d9e9" pod="openshift-marketplace/community-operators-zxtgj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-zxtgj\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:39 crc kubenswrapper[5021]: I0121 15:28:39.150666 5021 status_manager.go:851] "Failed to get status for pod" podUID="da685ff8-e5ae-4316-914d-690f55d41325" pod="openshift-marketplace/redhat-marketplace-qbdg4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-qbdg4\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:39 crc kubenswrapper[5021]: I0121 15:28:39.155187 5021 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-xgz8b" podUID="b45643f8-e7b2-4c4f-8da1-b4e753886a05" containerName="registry-server" probeResult="failure" output=< Jan 21 15:28:39 crc kubenswrapper[5021]: timeout: failed to connect service ":50051" within 1s Jan 21 15:28:39 crc kubenswrapper[5021]: > Jan 21 15:28:39 crc kubenswrapper[5021]: I0121 15:28:39.917691 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-vl5k6" Jan 21 15:28:39 crc kubenswrapper[5021]: I0121 15:28:39.917758 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-vl5k6" Jan 21 15:28:39 crc kubenswrapper[5021]: I0121 15:28:39.966481 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-vl5k6" Jan 21 15:28:39 crc kubenswrapper[5021]: I0121 15:28:39.967170 5021 status_manager.go:851] "Failed to get status for pod" podUID="9520cbac-26ed-42dc-a4f7-b2cbd670e722" pod="openshift-marketplace/certified-operators-vl5k6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-vl5k6\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:39 crc kubenswrapper[5021]: I0121 15:28:39.967707 5021 status_manager.go:851] "Failed to get status for pod" podUID="2a9fdb38-edc6-4315-a91c-9d8489695e24" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:39 crc kubenswrapper[5021]: I0121 15:28:39.968166 5021 status_manager.go:851] "Failed to get status for pod" podUID="f987e070-06b8-47d4-bc4a-441649d5d9e9" pod="openshift-marketplace/community-operators-zxtgj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-zxtgj\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:39 crc kubenswrapper[5021]: I0121 15:28:39.968680 5021 status_manager.go:851] "Failed to get status for pod" podUID="da685ff8-e5ae-4316-914d-690f55d41325" pod="openshift-marketplace/redhat-marketplace-qbdg4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-qbdg4\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:39 crc kubenswrapper[5021]: I0121 15:28:39.969112 5021 status_manager.go:851] "Failed to get status for pod" podUID="b45643f8-e7b2-4c4f-8da1-b4e753886a05" pod="openshift-marketplace/redhat-operators-xgz8b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-xgz8b\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:40 crc kubenswrapper[5021]: E0121 15:28:40.167527 5021 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.110:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 21 15:28:40 crc kubenswrapper[5021]: I0121 15:28:40.206897 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-vl5k6" Jan 21 15:28:40 crc kubenswrapper[5021]: I0121 15:28:40.207568 5021 status_manager.go:851] "Failed to get status for pod" podUID="2a9fdb38-edc6-4315-a91c-9d8489695e24" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:40 crc kubenswrapper[5021]: I0121 15:28:40.208257 5021 status_manager.go:851] "Failed to get status for pod" podUID="f987e070-06b8-47d4-bc4a-441649d5d9e9" pod="openshift-marketplace/community-operators-zxtgj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-zxtgj\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:40 crc kubenswrapper[5021]: I0121 15:28:40.208713 5021 status_manager.go:851] "Failed to get status for pod" podUID="da685ff8-e5ae-4316-914d-690f55d41325" pod="openshift-marketplace/redhat-marketplace-qbdg4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-qbdg4\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:40 crc kubenswrapper[5021]: I0121 15:28:40.209266 5021 status_manager.go:851] "Failed to get status for pod" podUID="b45643f8-e7b2-4c4f-8da1-b4e753886a05" pod="openshift-marketplace/redhat-operators-xgz8b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-xgz8b\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:40 crc kubenswrapper[5021]: I0121 15:28:40.209685 5021 status_manager.go:851] "Failed to get status for pod" podUID="9520cbac-26ed-42dc-a4f7-b2cbd670e722" pod="openshift-marketplace/certified-operators-vl5k6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-vl5k6\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:40 crc kubenswrapper[5021]: E0121 15:28:40.259264 5021 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.110:6443: connect: connection refused" interval="6.4s" Jan 21 15:28:40 crc kubenswrapper[5021]: I0121 15:28:40.539730 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-qbdg4" Jan 21 15:28:40 crc kubenswrapper[5021]: I0121 15:28:40.539802 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-qbdg4" Jan 21 15:28:40 crc kubenswrapper[5021]: I0121 15:28:40.580885 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-qbdg4" Jan 21 15:28:40 crc kubenswrapper[5021]: I0121 15:28:40.581697 5021 status_manager.go:851] "Failed to get status for pod" podUID="2a9fdb38-edc6-4315-a91c-9d8489695e24" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:40 crc kubenswrapper[5021]: I0121 15:28:40.583347 5021 status_manager.go:851] "Failed to get status for pod" podUID="f987e070-06b8-47d4-bc4a-441649d5d9e9" pod="openshift-marketplace/community-operators-zxtgj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-zxtgj\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:40 crc kubenswrapper[5021]: I0121 15:28:40.583945 5021 status_manager.go:851] "Failed to get status for pod" podUID="da685ff8-e5ae-4316-914d-690f55d41325" pod="openshift-marketplace/redhat-marketplace-qbdg4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-qbdg4\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:40 crc kubenswrapper[5021]: I0121 15:28:40.584433 5021 status_manager.go:851] "Failed to get status for pod" podUID="b45643f8-e7b2-4c4f-8da1-b4e753886a05" pod="openshift-marketplace/redhat-operators-xgz8b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-xgz8b\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:40 crc kubenswrapper[5021]: I0121 15:28:40.585699 5021 status_manager.go:851] "Failed to get status for pod" podUID="9520cbac-26ed-42dc-a4f7-b2cbd670e722" pod="openshift-marketplace/certified-operators-vl5k6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-vl5k6\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:41 crc kubenswrapper[5021]: I0121 15:28:41.214703 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-qbdg4" Jan 21 15:28:41 crc kubenswrapper[5021]: I0121 15:28:41.215481 5021 status_manager.go:851] "Failed to get status for pod" podUID="f987e070-06b8-47d4-bc4a-441649d5d9e9" pod="openshift-marketplace/community-operators-zxtgj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-zxtgj\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:41 crc kubenswrapper[5021]: I0121 15:28:41.215842 5021 status_manager.go:851] "Failed to get status for pod" podUID="da685ff8-e5ae-4316-914d-690f55d41325" pod="openshift-marketplace/redhat-marketplace-qbdg4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-qbdg4\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:41 crc kubenswrapper[5021]: I0121 15:28:41.216201 5021 status_manager.go:851] "Failed to get status for pod" podUID="b45643f8-e7b2-4c4f-8da1-b4e753886a05" pod="openshift-marketplace/redhat-operators-xgz8b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-xgz8b\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:41 crc kubenswrapper[5021]: I0121 15:28:41.216553 5021 status_manager.go:851] "Failed to get status for pod" podUID="9520cbac-26ed-42dc-a4f7-b2cbd670e722" pod="openshift-marketplace/certified-operators-vl5k6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-vl5k6\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:41 crc kubenswrapper[5021]: I0121 15:28:41.216918 5021 status_manager.go:851] "Failed to get status for pod" podUID="2a9fdb38-edc6-4315-a91c-9d8489695e24" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:43 crc kubenswrapper[5021]: E0121 15:28:43.756001 5021 desired_state_of_world_populator.go:312] "Error processing volume" err="error processing PVC openshift-image-registry/crc-image-registry-storage: failed to fetch PVC from API server: Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/persistentvolumeclaims/crc-image-registry-storage\": dial tcp 38.102.83.110:6443: connect: connection refused" pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" volumeName="registry-storage" Jan 21 15:28:45 crc kubenswrapper[5021]: I0121 15:28:45.677636 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" podUID="cdbaee95-6f0a-4b0e-8969-0b9fb32f808d" containerName="oauth-openshift" containerID="cri-o://f974ade6411edf3df31d2dd48279772af017d8b1a44dae42414625550c45e544" gracePeriod=15 Jan 21 15:28:46 crc kubenswrapper[5021]: E0121 15:28:46.004750 5021 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/events\": dial tcp 38.102.83.110:6443: connect: connection refused" event="&Event{ObjectMeta:{redhat-operators-xgz8b.188cc898c20d5d60 openshift-marketplace 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-marketplace,Name:redhat-operators-xgz8b,UID:b45643f8-e7b2-4c4f-8da1-b4e753886a05,APIVersion:v1,ResourceVersion:29602,FieldPath:spec.containers{registry-server},},Reason:Pulling,Message:Pulling image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\",Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-21 15:28:32.066002272 +0000 UTC m=+253.601116161,LastTimestamp:2026-01-21 15:28:32.066002272 +0000 UTC m=+253.601116161,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 21 15:28:46 crc kubenswrapper[5021]: E0121 15:28:46.660623 5021 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.110:6443: connect: connection refused" interval="7s" Jan 21 15:28:46 crc kubenswrapper[5021]: I0121 15:28:46.737548 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 15:28:46 crc kubenswrapper[5021]: I0121 15:28:46.739041 5021 status_manager.go:851] "Failed to get status for pod" podUID="9520cbac-26ed-42dc-a4f7-b2cbd670e722" pod="openshift-marketplace/certified-operators-vl5k6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-vl5k6\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:46 crc kubenswrapper[5021]: I0121 15:28:46.739416 5021 status_manager.go:851] "Failed to get status for pod" podUID="2a9fdb38-edc6-4315-a91c-9d8489695e24" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:46 crc kubenswrapper[5021]: I0121 15:28:46.739818 5021 status_manager.go:851] "Failed to get status for pod" podUID="f987e070-06b8-47d4-bc4a-441649d5d9e9" pod="openshift-marketplace/community-operators-zxtgj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-zxtgj\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:46 crc kubenswrapper[5021]: I0121 15:28:46.740186 5021 status_manager.go:851] "Failed to get status for pod" podUID="da685ff8-e5ae-4316-914d-690f55d41325" pod="openshift-marketplace/redhat-marketplace-qbdg4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-qbdg4\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:46 crc kubenswrapper[5021]: I0121 15:28:46.740416 5021 status_manager.go:851] "Failed to get status for pod" podUID="b45643f8-e7b2-4c4f-8da1-b4e753886a05" pod="openshift-marketplace/redhat-operators-xgz8b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-xgz8b\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:46 crc kubenswrapper[5021]: I0121 15:28:46.752673 5021 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db" Jan 21 15:28:46 crc kubenswrapper[5021]: I0121 15:28:46.752695 5021 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db" Jan 21 15:28:46 crc kubenswrapper[5021]: E0121 15:28:46.753007 5021 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 15:28:46 crc kubenswrapper[5021]: I0121 15:28:46.753369 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 15:28:46 crc kubenswrapper[5021]: W0121 15:28:46.777833 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71bb4a3aecc4ba5b26c4b7318770ce13.slice/crio-f73f7134852205f81028935a12c15cdb9b893d70d2c3f5025ce61c500928e129 WatchSource:0}: Error finding container f73f7134852205f81028935a12c15cdb9b893d70d2c3f5025ce61c500928e129: Status 404 returned error can't find the container with id f73f7134852205f81028935a12c15cdb9b893d70d2c3f5025ce61c500928e129 Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.066267 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.066815 5021 status_manager.go:851] "Failed to get status for pod" podUID="f987e070-06b8-47d4-bc4a-441649d5d9e9" pod="openshift-marketplace/community-operators-zxtgj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-zxtgj\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.067086 5021 status_manager.go:851] "Failed to get status for pod" podUID="da685ff8-e5ae-4316-914d-690f55d41325" pod="openshift-marketplace/redhat-marketplace-qbdg4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-qbdg4\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.067339 5021 status_manager.go:851] "Failed to get status for pod" podUID="b45643f8-e7b2-4c4f-8da1-b4e753886a05" pod="openshift-marketplace/redhat-operators-xgz8b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-xgz8b\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.067577 5021 status_manager.go:851] "Failed to get status for pod" podUID="9520cbac-26ed-42dc-a4f7-b2cbd670e722" pod="openshift-marketplace/certified-operators-vl5k6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-vl5k6\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.067790 5021 status_manager.go:851] "Failed to get status for pod" podUID="cdbaee95-6f0a-4b0e-8969-0b9fb32f808d" pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-bw5fp\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.068049 5021 status_manager.go:851] "Failed to get status for pod" podUID="2a9fdb38-edc6-4315-a91c-9d8489695e24" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.105191 5021 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Liveness probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.105248 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.194396 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-audit-policies\") pod \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.194461 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-system-ocp-branding-template\") pod \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.194491 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wdt9f\" (UniqueName: \"kubernetes.io/projected/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-kube-api-access-wdt9f\") pod \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.194542 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-system-trusted-ca-bundle\") pod \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.194575 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-system-router-certs\") pod \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.194604 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-system-service-ca\") pod \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.194647 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-user-template-provider-selection\") pod \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.194673 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-audit-dir\") pod \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.194699 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-system-cliconfig\") pod \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.194723 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-system-serving-cert\") pod \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.194773 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-system-session\") pod \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.194797 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-user-template-login\") pod \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.194823 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-user-idp-0-file-data\") pod \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.194848 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-user-template-error\") pod \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\" (UID: \"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d\") " Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.195592 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "cdbaee95-6f0a-4b0e-8969-0b9fb32f808d" (UID: "cdbaee95-6f0a-4b0e-8969-0b9fb32f808d"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.195772 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "cdbaee95-6f0a-4b0e-8969-0b9fb32f808d" (UID: "cdbaee95-6f0a-4b0e-8969-0b9fb32f808d"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.196687 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "cdbaee95-6f0a-4b0e-8969-0b9fb32f808d" (UID: "cdbaee95-6f0a-4b0e-8969-0b9fb32f808d"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.196696 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "cdbaee95-6f0a-4b0e-8969-0b9fb32f808d" (UID: "cdbaee95-6f0a-4b0e-8969-0b9fb32f808d"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.196835 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "cdbaee95-6f0a-4b0e-8969-0b9fb32f808d" (UID: "cdbaee95-6f0a-4b0e-8969-0b9fb32f808d"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.201256 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "cdbaee95-6f0a-4b0e-8969-0b9fb32f808d" (UID: "cdbaee95-6f0a-4b0e-8969-0b9fb32f808d"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.201503 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "cdbaee95-6f0a-4b0e-8969-0b9fb32f808d" (UID: "cdbaee95-6f0a-4b0e-8969-0b9fb32f808d"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.201629 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "cdbaee95-6f0a-4b0e-8969-0b9fb32f808d" (UID: "cdbaee95-6f0a-4b0e-8969-0b9fb32f808d"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.201982 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-kube-api-access-wdt9f" (OuterVolumeSpecName: "kube-api-access-wdt9f") pod "cdbaee95-6f0a-4b0e-8969-0b9fb32f808d" (UID: "cdbaee95-6f0a-4b0e-8969-0b9fb32f808d"). InnerVolumeSpecName "kube-api-access-wdt9f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.201987 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "cdbaee95-6f0a-4b0e-8969-0b9fb32f808d" (UID: "cdbaee95-6f0a-4b0e-8969-0b9fb32f808d"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.202103 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "cdbaee95-6f0a-4b0e-8969-0b9fb32f808d" (UID: "cdbaee95-6f0a-4b0e-8969-0b9fb32f808d"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.202249 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "cdbaee95-6f0a-4b0e-8969-0b9fb32f808d" (UID: "cdbaee95-6f0a-4b0e-8969-0b9fb32f808d"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.202357 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "cdbaee95-6f0a-4b0e-8969-0b9fb32f808d" (UID: "cdbaee95-6f0a-4b0e-8969-0b9fb32f808d"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.202955 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "cdbaee95-6f0a-4b0e-8969-0b9fb32f808d" (UID: "cdbaee95-6f0a-4b0e-8969-0b9fb32f808d"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.210578 5021 generic.go:334] "Generic (PLEG): container finished" podID="cdbaee95-6f0a-4b0e-8969-0b9fb32f808d" containerID="f974ade6411edf3df31d2dd48279772af017d8b1a44dae42414625550c45e544" exitCode=0 Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.210631 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.210642 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" event={"ID":"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d","Type":"ContainerDied","Data":"f974ade6411edf3df31d2dd48279772af017d8b1a44dae42414625550c45e544"} Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.210668 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" event={"ID":"cdbaee95-6f0a-4b0e-8969-0b9fb32f808d","Type":"ContainerDied","Data":"17e6e6f68d15cb5ef97c05bbd10f2296804005b3b2aa39b22e81be766e7f5239"} Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.210684 5021 scope.go:117] "RemoveContainer" containerID="f974ade6411edf3df31d2dd48279772af017d8b1a44dae42414625550c45e544" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.211754 5021 status_manager.go:851] "Failed to get status for pod" podUID="9520cbac-26ed-42dc-a4f7-b2cbd670e722" pod="openshift-marketplace/certified-operators-vl5k6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-vl5k6\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.212315 5021 status_manager.go:851] "Failed to get status for pod" podUID="2a9fdb38-edc6-4315-a91c-9d8489695e24" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.212732 5021 status_manager.go:851] "Failed to get status for pod" podUID="cdbaee95-6f0a-4b0e-8969-0b9fb32f808d" pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-bw5fp\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.213093 5021 status_manager.go:851] "Failed to get status for pod" podUID="f987e070-06b8-47d4-bc4a-441649d5d9e9" pod="openshift-marketplace/community-operators-zxtgj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-zxtgj\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.213334 5021 status_manager.go:851] "Failed to get status for pod" podUID="da685ff8-e5ae-4316-914d-690f55d41325" pod="openshift-marketplace/redhat-marketplace-qbdg4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-qbdg4\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.213674 5021 status_manager.go:851] "Failed to get status for pod" podUID="b45643f8-e7b2-4c4f-8da1-b4e753886a05" pod="openshift-marketplace/redhat-operators-xgz8b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-xgz8b\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.214866 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.215013 5021 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="f805306703ac31a2921b4d3bfd202ef7c3c71e5954c987ad3b4bbd4827b46568" exitCode=1 Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.215119 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"f805306703ac31a2921b4d3bfd202ef7c3c71e5954c987ad3b4bbd4827b46568"} Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.215646 5021 scope.go:117] "RemoveContainer" containerID="f805306703ac31a2921b4d3bfd202ef7c3c71e5954c987ad3b4bbd4827b46568" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.216317 5021 status_manager.go:851] "Failed to get status for pod" podUID="cdbaee95-6f0a-4b0e-8969-0b9fb32f808d" pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-bw5fp\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.216767 5021 status_manager.go:851] "Failed to get status for pod" podUID="2a9fdb38-edc6-4315-a91c-9d8489695e24" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.217168 5021 status_manager.go:851] "Failed to get status for pod" podUID="f987e070-06b8-47d4-bc4a-441649d5d9e9" pod="openshift-marketplace/community-operators-zxtgj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-zxtgj\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.217545 5021 status_manager.go:851] "Failed to get status for pod" podUID="da685ff8-e5ae-4316-914d-690f55d41325" pod="openshift-marketplace/redhat-marketplace-qbdg4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-qbdg4\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.217637 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"a2cf9cd3237983d5d00ed3d8c34fd8f58b3820a76981df151f0401c2d921abb1"} Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.217712 5021 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="a2cf9cd3237983d5d00ed3d8c34fd8f58b3820a76981df151f0401c2d921abb1" exitCode=0 Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.217832 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"f73f7134852205f81028935a12c15cdb9b893d70d2c3f5025ce61c500928e129"} Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.217936 5021 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.218070 5021 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.217767 5021 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:47 crc kubenswrapper[5021]: E0121 15:28:47.218545 5021 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.218650 5021 status_manager.go:851] "Failed to get status for pod" podUID="b45643f8-e7b2-4c4f-8da1-b4e753886a05" pod="openshift-marketplace/redhat-operators-xgz8b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-xgz8b\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.219013 5021 status_manager.go:851] "Failed to get status for pod" podUID="9520cbac-26ed-42dc-a4f7-b2cbd670e722" pod="openshift-marketplace/certified-operators-vl5k6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-vl5k6\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.219421 5021 status_manager.go:851] "Failed to get status for pod" podUID="2a9fdb38-edc6-4315-a91c-9d8489695e24" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.219667 5021 status_manager.go:851] "Failed to get status for pod" podUID="cdbaee95-6f0a-4b0e-8969-0b9fb32f808d" pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-bw5fp\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.220035 5021 status_manager.go:851] "Failed to get status for pod" podUID="f987e070-06b8-47d4-bc4a-441649d5d9e9" pod="openshift-marketplace/community-operators-zxtgj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-zxtgj\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.220377 5021 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.220649 5021 status_manager.go:851] "Failed to get status for pod" podUID="da685ff8-e5ae-4316-914d-690f55d41325" pod="openshift-marketplace/redhat-marketplace-qbdg4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-qbdg4\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.221010 5021 status_manager.go:851] "Failed to get status for pod" podUID="b45643f8-e7b2-4c4f-8da1-b4e753886a05" pod="openshift-marketplace/redhat-operators-xgz8b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-xgz8b\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.221339 5021 status_manager.go:851] "Failed to get status for pod" podUID="9520cbac-26ed-42dc-a4f7-b2cbd670e722" pod="openshift-marketplace/certified-operators-vl5k6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-vl5k6\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.225000 5021 status_manager.go:851] "Failed to get status for pod" podUID="2a9fdb38-edc6-4315-a91c-9d8489695e24" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.225251 5021 status_manager.go:851] "Failed to get status for pod" podUID="cdbaee95-6f0a-4b0e-8969-0b9fb32f808d" pod="openshift-authentication/oauth-openshift-558db77b4-bw5fp" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-bw5fp\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.225546 5021 status_manager.go:851] "Failed to get status for pod" podUID="f987e070-06b8-47d4-bc4a-441649d5d9e9" pod="openshift-marketplace/community-operators-zxtgj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-zxtgj\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.225826 5021 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.226126 5021 status_manager.go:851] "Failed to get status for pod" podUID="da685ff8-e5ae-4316-914d-690f55d41325" pod="openshift-marketplace/redhat-marketplace-qbdg4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-qbdg4\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.226468 5021 status_manager.go:851] "Failed to get status for pod" podUID="b45643f8-e7b2-4c4f-8da1-b4e753886a05" pod="openshift-marketplace/redhat-operators-xgz8b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-xgz8b\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.226744 5021 status_manager.go:851] "Failed to get status for pod" podUID="9520cbac-26ed-42dc-a4f7-b2cbd670e722" pod="openshift-marketplace/certified-operators-vl5k6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-vl5k6\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.235380 5021 scope.go:117] "RemoveContainer" containerID="f974ade6411edf3df31d2dd48279772af017d8b1a44dae42414625550c45e544" Jan 21 15:28:47 crc kubenswrapper[5021]: E0121 15:28:47.235939 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f974ade6411edf3df31d2dd48279772af017d8b1a44dae42414625550c45e544\": container with ID starting with f974ade6411edf3df31d2dd48279772af017d8b1a44dae42414625550c45e544 not found: ID does not exist" containerID="f974ade6411edf3df31d2dd48279772af017d8b1a44dae42414625550c45e544" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.236051 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f974ade6411edf3df31d2dd48279772af017d8b1a44dae42414625550c45e544"} err="failed to get container status \"f974ade6411edf3df31d2dd48279772af017d8b1a44dae42414625550c45e544\": rpc error: code = NotFound desc = could not find container \"f974ade6411edf3df31d2dd48279772af017d8b1a44dae42414625550c45e544\": container with ID starting with f974ade6411edf3df31d2dd48279772af017d8b1a44dae42414625550c45e544 not found: ID does not exist" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.296482 5021 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.296514 5021 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.296528 5021 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-audit-dir\") on node \"crc\" DevicePath \"\"" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.296538 5021 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.296547 5021 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.296556 5021 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.296566 5021 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.296576 5021 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.296583 5021 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.296592 5021 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.296600 5021 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.296610 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wdt9f\" (UniqueName: \"kubernetes.io/projected/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-kube-api-access-wdt9f\") on node \"crc\" DevicePath \"\"" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.296619 5021 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:28:47 crc kubenswrapper[5021]: I0121 15:28:47.296628 5021 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Jan 21 15:28:48 crc kubenswrapper[5021]: I0121 15:28:48.163411 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-xgz8b" Jan 21 15:28:48 crc kubenswrapper[5021]: I0121 15:28:48.211726 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-xgz8b" Jan 21 15:28:48 crc kubenswrapper[5021]: I0121 15:28:48.227894 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"98a3c5f0a9240d4b031fa4020529307a15504c8201d7566f991517c0b41a9c94"} Jan 21 15:28:48 crc kubenswrapper[5021]: I0121 15:28:48.227953 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"92f68b995b1b0fd86b4c8ae9cc9ee6347aced4bdb0ec2aafb30d953d6a4b460b"} Jan 21 15:28:48 crc kubenswrapper[5021]: I0121 15:28:48.230944 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 21 15:28:48 crc kubenswrapper[5021]: I0121 15:28:48.231074 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"8774e7d5d50fe842319bbaa6499e5abd7b483fe6cf179009c19499dedb7ae209"} Jan 21 15:28:49 crc kubenswrapper[5021]: I0121 15:28:49.239951 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"257f8198e1c4000752c4a84ab5e91a5874600fb813e57c4a158d80266e840ef9"} Jan 21 15:28:49 crc kubenswrapper[5021]: I0121 15:28:49.240369 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"a14b1d25f88291c69ffbe7f46a36c441832ac6db4899ce1d77868f91ac5f7512"} Jan 21 15:28:49 crc kubenswrapper[5021]: I0121 15:28:49.240394 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 15:28:49 crc kubenswrapper[5021]: I0121 15:28:49.240409 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"357d8d9a11d1d15d2dffb4d529e0596dd3186c2309c299c9c7cb266119e9816c"} Jan 21 15:28:49 crc kubenswrapper[5021]: I0121 15:28:49.240317 5021 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db" Jan 21 15:28:49 crc kubenswrapper[5021]: I0121 15:28:49.240433 5021 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db" Jan 21 15:28:50 crc kubenswrapper[5021]: I0121 15:28:50.286028 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 21 15:28:50 crc kubenswrapper[5021]: I0121 15:28:50.290642 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 21 15:28:50 crc kubenswrapper[5021]: I0121 15:28:50.900576 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 21 15:28:51 crc kubenswrapper[5021]: I0121 15:28:51.758828 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 15:28:51 crc kubenswrapper[5021]: I0121 15:28:51.758935 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 15:28:51 crc kubenswrapper[5021]: I0121 15:28:51.773975 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 15:28:54 crc kubenswrapper[5021]: I0121 15:28:54.254196 5021 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 15:28:55 crc kubenswrapper[5021]: I0121 15:28:55.279566 5021 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db" Jan 21 15:28:55 crc kubenswrapper[5021]: I0121 15:28:55.279612 5021 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db" Jan 21 15:28:55 crc kubenswrapper[5021]: I0121 15:28:55.284445 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 15:28:55 crc kubenswrapper[5021]: I0121 15:28:55.288434 5021 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="e12c4976-de49-493a-b8e5-7450ca862ad9" Jan 21 15:28:56 crc kubenswrapper[5021]: I0121 15:28:56.285169 5021 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db" Jan 21 15:28:56 crc kubenswrapper[5021]: I0121 15:28:56.285205 5021 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f2b8eb71-fd1e-4587-b6fb-81a25ff4d8db" Jan 21 15:28:58 crc kubenswrapper[5021]: I0121 15:28:58.764390 5021 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="e12c4976-de49-493a-b8e5-7450ca862ad9" Jan 21 15:29:00 crc kubenswrapper[5021]: I0121 15:29:00.904266 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 21 15:29:03 crc kubenswrapper[5021]: I0121 15:29:03.431668 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Jan 21 15:29:04 crc kubenswrapper[5021]: I0121 15:29:04.153756 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Jan 21 15:29:04 crc kubenswrapper[5021]: I0121 15:29:04.227030 5021 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Jan 21 15:29:04 crc kubenswrapper[5021]: I0121 15:29:04.350797 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Jan 21 15:29:04 crc kubenswrapper[5021]: I0121 15:29:04.517377 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Jan 21 15:29:05 crc kubenswrapper[5021]: I0121 15:29:05.179406 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Jan 21 15:29:05 crc kubenswrapper[5021]: I0121 15:29:05.246808 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Jan 21 15:29:05 crc kubenswrapper[5021]: I0121 15:29:05.578042 5021 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","burstable","podf4b27818a5e8e43d0dc095d08835c792"] err="unable to destroy cgroup paths for cgroup [kubepods burstable podf4b27818a5e8e43d0dc095d08835c792] : Timed out while waiting for systemd to remove kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice" Jan 21 15:29:05 crc kubenswrapper[5021]: I0121 15:29:05.653968 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Jan 21 15:29:05 crc kubenswrapper[5021]: I0121 15:29:05.855670 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Jan 21 15:29:05 crc kubenswrapper[5021]: I0121 15:29:05.933249 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Jan 21 15:29:06 crc kubenswrapper[5021]: I0121 15:29:06.104815 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Jan 21 15:29:06 crc kubenswrapper[5021]: I0121 15:29:06.175012 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Jan 21 15:29:06 crc kubenswrapper[5021]: I0121 15:29:06.177570 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Jan 21 15:29:06 crc kubenswrapper[5021]: I0121 15:29:06.202170 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 21 15:29:06 crc kubenswrapper[5021]: I0121 15:29:06.382390 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Jan 21 15:29:06 crc kubenswrapper[5021]: I0121 15:29:06.538534 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Jan 21 15:29:06 crc kubenswrapper[5021]: I0121 15:29:06.596220 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Jan 21 15:29:06 crc kubenswrapper[5021]: I0121 15:29:06.606879 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Jan 21 15:29:06 crc kubenswrapper[5021]: I0121 15:29:06.613813 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Jan 21 15:29:06 crc kubenswrapper[5021]: I0121 15:29:06.699125 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Jan 21 15:29:06 crc kubenswrapper[5021]: I0121 15:29:06.713453 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Jan 21 15:29:07 crc kubenswrapper[5021]: I0121 15:29:07.153965 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 21 15:29:07 crc kubenswrapper[5021]: I0121 15:29:07.186148 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Jan 21 15:29:07 crc kubenswrapper[5021]: I0121 15:29:07.205615 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 21 15:29:07 crc kubenswrapper[5021]: I0121 15:29:07.285541 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Jan 21 15:29:07 crc kubenswrapper[5021]: I0121 15:29:07.508713 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Jan 21 15:29:07 crc kubenswrapper[5021]: I0121 15:29:07.534422 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Jan 21 15:29:07 crc kubenswrapper[5021]: I0121 15:29:07.549869 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Jan 21 15:29:07 crc kubenswrapper[5021]: I0121 15:29:07.621546 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Jan 21 15:29:07 crc kubenswrapper[5021]: I0121 15:29:07.668607 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Jan 21 15:29:07 crc kubenswrapper[5021]: I0121 15:29:07.694083 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Jan 21 15:29:07 crc kubenswrapper[5021]: I0121 15:29:07.827672 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Jan 21 15:29:08 crc kubenswrapper[5021]: I0121 15:29:08.006668 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Jan 21 15:29:08 crc kubenswrapper[5021]: I0121 15:29:08.047505 5021 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Jan 21 15:29:08 crc kubenswrapper[5021]: I0121 15:29:08.049004 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-vl5k6" podStartSLOduration=36.348253508 podStartE2EDuration="39.048992486s" podCreationTimestamp="2026-01-21 15:28:29 +0000 UTC" firstStartedPulling="2026-01-21 15:28:31.054365335 +0000 UTC m=+252.589479224" lastFinishedPulling="2026-01-21 15:28:33.755104313 +0000 UTC m=+255.290218202" observedRunningTime="2026-01-21 15:28:53.977126234 +0000 UTC m=+275.512240123" watchObservedRunningTime="2026-01-21 15:29:08.048992486 +0000 UTC m=+289.584106375" Jan 21 15:29:08 crc kubenswrapper[5021]: I0121 15:29:08.049895 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-xgz8b" podStartSLOduration=37.40837092 podStartE2EDuration="41.049889713s" podCreationTimestamp="2026-01-21 15:28:27 +0000 UTC" firstStartedPulling="2026-01-21 15:28:29.025345188 +0000 UTC m=+250.560459067" lastFinishedPulling="2026-01-21 15:28:32.666863971 +0000 UTC m=+254.201977860" observedRunningTime="2026-01-21 15:28:53.957340067 +0000 UTC m=+275.492453956" watchObservedRunningTime="2026-01-21 15:29:08.049889713 +0000 UTC m=+289.585003602" Jan 21 15:29:08 crc kubenswrapper[5021]: I0121 15:29:08.050505 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-qbdg4" podStartSLOduration=35.599332263 podStartE2EDuration="38.050500212s" podCreationTimestamp="2026-01-21 15:28:30 +0000 UTC" firstStartedPulling="2026-01-21 15:28:31.043146566 +0000 UTC m=+252.578260455" lastFinishedPulling="2026-01-21 15:28:33.494314515 +0000 UTC m=+255.029428404" observedRunningTime="2026-01-21 15:28:53.934983781 +0000 UTC m=+275.470097670" watchObservedRunningTime="2026-01-21 15:29:08.050500212 +0000 UTC m=+289.585614101" Jan 21 15:29:08 crc kubenswrapper[5021]: I0121 15:29:08.051994 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc","openshift-authentication/oauth-openshift-558db77b4-bw5fp"] Jan 21 15:29:08 crc kubenswrapper[5021]: I0121 15:29:08.052046 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 21 15:29:08 crc kubenswrapper[5021]: I0121 15:29:08.056760 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 21 15:29:08 crc kubenswrapper[5021]: I0121 15:29:08.069954 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=14.069937309 podStartE2EDuration="14.069937309s" podCreationTimestamp="2026-01-21 15:28:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:29:08.069088993 +0000 UTC m=+289.604202882" watchObservedRunningTime="2026-01-21 15:29:08.069937309 +0000 UTC m=+289.605051198" Jan 21 15:29:08 crc kubenswrapper[5021]: I0121 15:29:08.170256 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Jan 21 15:29:08 crc kubenswrapper[5021]: I0121 15:29:08.307707 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 21 15:29:08 crc kubenswrapper[5021]: I0121 15:29:08.373946 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Jan 21 15:29:08 crc kubenswrapper[5021]: I0121 15:29:08.382339 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Jan 21 15:29:08 crc kubenswrapper[5021]: I0121 15:29:08.647682 5021 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Jan 21 15:29:08 crc kubenswrapper[5021]: I0121 15:29:08.739978 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Jan 21 15:29:08 crc kubenswrapper[5021]: I0121 15:29:08.762603 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cdbaee95-6f0a-4b0e-8969-0b9fb32f808d" path="/var/lib/kubelet/pods/cdbaee95-6f0a-4b0e-8969-0b9fb32f808d/volumes" Jan 21 15:29:08 crc kubenswrapper[5021]: I0121 15:29:08.806253 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Jan 21 15:29:08 crc kubenswrapper[5021]: I0121 15:29:08.931567 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 21 15:29:08 crc kubenswrapper[5021]: I0121 15:29:08.936656 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Jan 21 15:29:09 crc kubenswrapper[5021]: I0121 15:29:09.068168 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Jan 21 15:29:09 crc kubenswrapper[5021]: I0121 15:29:09.188484 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Jan 21 15:29:09 crc kubenswrapper[5021]: I0121 15:29:09.199310 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Jan 21 15:29:09 crc kubenswrapper[5021]: I0121 15:29:09.227342 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Jan 21 15:29:09 crc kubenswrapper[5021]: I0121 15:29:09.275078 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Jan 21 15:29:09 crc kubenswrapper[5021]: I0121 15:29:09.313971 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Jan 21 15:29:09 crc kubenswrapper[5021]: I0121 15:29:09.425053 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Jan 21 15:29:09 crc kubenswrapper[5021]: I0121 15:29:09.465519 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Jan 21 15:29:09 crc kubenswrapper[5021]: I0121 15:29:09.518301 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Jan 21 15:29:09 crc kubenswrapper[5021]: I0121 15:29:09.536901 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 21 15:29:09 crc kubenswrapper[5021]: I0121 15:29:09.561768 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Jan 21 15:29:09 crc kubenswrapper[5021]: I0121 15:29:09.599478 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 21 15:29:09 crc kubenswrapper[5021]: I0121 15:29:09.703241 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Jan 21 15:29:09 crc kubenswrapper[5021]: I0121 15:29:09.713488 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Jan 21 15:29:09 crc kubenswrapper[5021]: I0121 15:29:09.739978 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 21 15:29:09 crc kubenswrapper[5021]: I0121 15:29:09.806159 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Jan 21 15:29:09 crc kubenswrapper[5021]: I0121 15:29:09.806797 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Jan 21 15:29:10 crc kubenswrapper[5021]: I0121 15:29:10.046768 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Jan 21 15:29:10 crc kubenswrapper[5021]: I0121 15:29:10.177959 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Jan 21 15:29:10 crc kubenswrapper[5021]: I0121 15:29:10.185264 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Jan 21 15:29:10 crc kubenswrapper[5021]: I0121 15:29:10.231877 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Jan 21 15:29:10 crc kubenswrapper[5021]: I0121 15:29:10.284426 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Jan 21 15:29:10 crc kubenswrapper[5021]: I0121 15:29:10.286222 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Jan 21 15:29:10 crc kubenswrapper[5021]: I0121 15:29:10.333121 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Jan 21 15:29:10 crc kubenswrapper[5021]: I0121 15:29:10.343195 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Jan 21 15:29:10 crc kubenswrapper[5021]: I0121 15:29:10.360109 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Jan 21 15:29:10 crc kubenswrapper[5021]: I0121 15:29:10.380550 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Jan 21 15:29:10 crc kubenswrapper[5021]: I0121 15:29:10.385238 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Jan 21 15:29:10 crc kubenswrapper[5021]: I0121 15:29:10.591282 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Jan 21 15:29:10 crc kubenswrapper[5021]: I0121 15:29:10.668330 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Jan 21 15:29:10 crc kubenswrapper[5021]: I0121 15:29:10.861930 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Jan 21 15:29:10 crc kubenswrapper[5021]: I0121 15:29:10.882324 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Jan 21 15:29:10 crc kubenswrapper[5021]: I0121 15:29:10.901260 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Jan 21 15:29:10 crc kubenswrapper[5021]: I0121 15:29:10.973741 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 21 15:29:10 crc kubenswrapper[5021]: I0121 15:29:10.994853 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Jan 21 15:29:11 crc kubenswrapper[5021]: I0121 15:29:11.060531 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Jan 21 15:29:11 crc kubenswrapper[5021]: I0121 15:29:11.103597 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Jan 21 15:29:11 crc kubenswrapper[5021]: I0121 15:29:11.123996 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Jan 21 15:29:11 crc kubenswrapper[5021]: I0121 15:29:11.134513 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Jan 21 15:29:11 crc kubenswrapper[5021]: I0121 15:29:11.231722 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Jan 21 15:29:11 crc kubenswrapper[5021]: I0121 15:29:11.238032 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Jan 21 15:29:11 crc kubenswrapper[5021]: I0121 15:29:11.343278 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Jan 21 15:29:11 crc kubenswrapper[5021]: I0121 15:29:11.389743 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Jan 21 15:29:11 crc kubenswrapper[5021]: I0121 15:29:11.390991 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Jan 21 15:29:11 crc kubenswrapper[5021]: I0121 15:29:11.401121 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Jan 21 15:29:11 crc kubenswrapper[5021]: I0121 15:29:11.409868 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 21 15:29:11 crc kubenswrapper[5021]: I0121 15:29:11.448128 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Jan 21 15:29:11 crc kubenswrapper[5021]: I0121 15:29:11.508599 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Jan 21 15:29:11 crc kubenswrapper[5021]: I0121 15:29:11.519560 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Jan 21 15:29:11 crc kubenswrapper[5021]: I0121 15:29:11.654858 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 21 15:29:11 crc kubenswrapper[5021]: I0121 15:29:11.680082 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 21 15:29:11 crc kubenswrapper[5021]: I0121 15:29:11.774926 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Jan 21 15:29:11 crc kubenswrapper[5021]: I0121 15:29:11.793173 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Jan 21 15:29:11 crc kubenswrapper[5021]: I0121 15:29:11.798028 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Jan 21 15:29:11 crc kubenswrapper[5021]: I0121 15:29:11.825418 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 21 15:29:11 crc kubenswrapper[5021]: I0121 15:29:11.903715 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Jan 21 15:29:11 crc kubenswrapper[5021]: I0121 15:29:11.978616 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Jan 21 15:29:12 crc kubenswrapper[5021]: I0121 15:29:12.159018 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Jan 21 15:29:12 crc kubenswrapper[5021]: I0121 15:29:12.178066 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Jan 21 15:29:12 crc kubenswrapper[5021]: I0121 15:29:12.373547 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Jan 21 15:29:12 crc kubenswrapper[5021]: I0121 15:29:12.381424 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Jan 21 15:29:12 crc kubenswrapper[5021]: I0121 15:29:12.388403 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Jan 21 15:29:12 crc kubenswrapper[5021]: I0121 15:29:12.434441 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Jan 21 15:29:12 crc kubenswrapper[5021]: I0121 15:29:12.486662 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Jan 21 15:29:12 crc kubenswrapper[5021]: I0121 15:29:12.496080 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Jan 21 15:29:12 crc kubenswrapper[5021]: I0121 15:29:12.508170 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Jan 21 15:29:12 crc kubenswrapper[5021]: I0121 15:29:12.596339 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Jan 21 15:29:12 crc kubenswrapper[5021]: I0121 15:29:12.605004 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Jan 21 15:29:12 crc kubenswrapper[5021]: I0121 15:29:12.622984 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Jan 21 15:29:12 crc kubenswrapper[5021]: I0121 15:29:12.711752 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 21 15:29:12 crc kubenswrapper[5021]: I0121 15:29:12.733626 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Jan 21 15:29:12 crc kubenswrapper[5021]: I0121 15:29:12.837207 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Jan 21 15:29:12 crc kubenswrapper[5021]: I0121 15:29:12.929432 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Jan 21 15:29:12 crc kubenswrapper[5021]: I0121 15:29:12.975629 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Jan 21 15:29:12 crc kubenswrapper[5021]: I0121 15:29:12.975629 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 21 15:29:13 crc kubenswrapper[5021]: I0121 15:29:13.034998 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Jan 21 15:29:13 crc kubenswrapper[5021]: I0121 15:29:13.038367 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Jan 21 15:29:13 crc kubenswrapper[5021]: I0121 15:29:13.048885 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Jan 21 15:29:13 crc kubenswrapper[5021]: I0121 15:29:13.091884 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Jan 21 15:29:13 crc kubenswrapper[5021]: I0121 15:29:13.141141 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Jan 21 15:29:13 crc kubenswrapper[5021]: I0121 15:29:13.143138 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Jan 21 15:29:13 crc kubenswrapper[5021]: I0121 15:29:13.154617 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Jan 21 15:29:13 crc kubenswrapper[5021]: I0121 15:29:13.155603 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Jan 21 15:29:13 crc kubenswrapper[5021]: I0121 15:29:13.392365 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 21 15:29:13 crc kubenswrapper[5021]: I0121 15:29:13.404518 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Jan 21 15:29:13 crc kubenswrapper[5021]: I0121 15:29:13.405239 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 21 15:29:13 crc kubenswrapper[5021]: I0121 15:29:13.414230 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Jan 21 15:29:13 crc kubenswrapper[5021]: I0121 15:29:13.435354 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Jan 21 15:29:13 crc kubenswrapper[5021]: I0121 15:29:13.444853 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Jan 21 15:29:13 crc kubenswrapper[5021]: I0121 15:29:13.461875 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Jan 21 15:29:13 crc kubenswrapper[5021]: I0121 15:29:13.480557 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Jan 21 15:29:13 crc kubenswrapper[5021]: I0121 15:29:13.522140 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Jan 21 15:29:13 crc kubenswrapper[5021]: I0121 15:29:13.619736 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Jan 21 15:29:13 crc kubenswrapper[5021]: I0121 15:29:13.652856 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 21 15:29:13 crc kubenswrapper[5021]: I0121 15:29:13.700836 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Jan 21 15:29:13 crc kubenswrapper[5021]: I0121 15:29:13.711121 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Jan 21 15:29:13 crc kubenswrapper[5021]: I0121 15:29:13.722262 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Jan 21 15:29:13 crc kubenswrapper[5021]: I0121 15:29:13.766665 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Jan 21 15:29:13 crc kubenswrapper[5021]: I0121 15:29:13.835075 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Jan 21 15:29:13 crc kubenswrapper[5021]: I0121 15:29:13.879389 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Jan 21 15:29:13 crc kubenswrapper[5021]: I0121 15:29:13.975270 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Jan 21 15:29:14 crc kubenswrapper[5021]: I0121 15:29:14.089757 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Jan 21 15:29:14 crc kubenswrapper[5021]: I0121 15:29:14.094148 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Jan 21 15:29:14 crc kubenswrapper[5021]: I0121 15:29:14.158003 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Jan 21 15:29:14 crc kubenswrapper[5021]: I0121 15:29:14.170444 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Jan 21 15:29:14 crc kubenswrapper[5021]: I0121 15:29:14.189114 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Jan 21 15:29:14 crc kubenswrapper[5021]: I0121 15:29:14.231247 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Jan 21 15:29:14 crc kubenswrapper[5021]: I0121 15:29:14.279491 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Jan 21 15:29:14 crc kubenswrapper[5021]: I0121 15:29:14.307889 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Jan 21 15:29:14 crc kubenswrapper[5021]: I0121 15:29:14.359314 5021 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 21 15:29:14 crc kubenswrapper[5021]: I0121 15:29:14.426702 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Jan 21 15:29:14 crc kubenswrapper[5021]: I0121 15:29:14.444220 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Jan 21 15:29:14 crc kubenswrapper[5021]: I0121 15:29:14.487503 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Jan 21 15:29:14 crc kubenswrapper[5021]: I0121 15:29:14.495186 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Jan 21 15:29:14 crc kubenswrapper[5021]: I0121 15:29:14.522734 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Jan 21 15:29:14 crc kubenswrapper[5021]: I0121 15:29:14.604793 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Jan 21 15:29:14 crc kubenswrapper[5021]: I0121 15:29:14.683674 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Jan 21 15:29:14 crc kubenswrapper[5021]: I0121 15:29:14.723129 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Jan 21 15:29:14 crc kubenswrapper[5021]: I0121 15:29:14.736729 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Jan 21 15:29:14 crc kubenswrapper[5021]: I0121 15:29:14.759976 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Jan 21 15:29:14 crc kubenswrapper[5021]: I0121 15:29:14.792988 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Jan 21 15:29:14 crc kubenswrapper[5021]: I0121 15:29:14.851707 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Jan 21 15:29:14 crc kubenswrapper[5021]: I0121 15:29:14.892793 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 21 15:29:14 crc kubenswrapper[5021]: I0121 15:29:14.959975 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Jan 21 15:29:15 crc kubenswrapper[5021]: I0121 15:29:15.199307 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Jan 21 15:29:15 crc kubenswrapper[5021]: I0121 15:29:15.217506 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Jan 21 15:29:15 crc kubenswrapper[5021]: I0121 15:29:15.271311 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Jan 21 15:29:15 crc kubenswrapper[5021]: I0121 15:29:15.283365 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Jan 21 15:29:15 crc kubenswrapper[5021]: I0121 15:29:15.303177 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Jan 21 15:29:15 crc kubenswrapper[5021]: I0121 15:29:15.563310 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Jan 21 15:29:15 crc kubenswrapper[5021]: I0121 15:29:15.565760 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Jan 21 15:29:15 crc kubenswrapper[5021]: I0121 15:29:15.611408 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Jan 21 15:29:15 crc kubenswrapper[5021]: I0121 15:29:15.614258 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 21 15:29:15 crc kubenswrapper[5021]: I0121 15:29:15.652095 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Jan 21 15:29:15 crc kubenswrapper[5021]: I0121 15:29:15.690507 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Jan 21 15:29:15 crc kubenswrapper[5021]: I0121 15:29:15.711427 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Jan 21 15:29:15 crc kubenswrapper[5021]: I0121 15:29:15.717709 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Jan 21 15:29:15 crc kubenswrapper[5021]: I0121 15:29:15.860629 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Jan 21 15:29:15 crc kubenswrapper[5021]: I0121 15:29:15.892985 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Jan 21 15:29:15 crc kubenswrapper[5021]: I0121 15:29:15.921261 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Jan 21 15:29:15 crc kubenswrapper[5021]: I0121 15:29:15.969547 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Jan 21 15:29:15 crc kubenswrapper[5021]: I0121 15:29:15.987714 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 21 15:29:16 crc kubenswrapper[5021]: I0121 15:29:16.074687 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Jan 21 15:29:16 crc kubenswrapper[5021]: I0121 15:29:16.100879 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Jan 21 15:29:16 crc kubenswrapper[5021]: I0121 15:29:16.133783 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Jan 21 15:29:16 crc kubenswrapper[5021]: I0121 15:29:16.177799 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Jan 21 15:29:16 crc kubenswrapper[5021]: I0121 15:29:16.251078 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Jan 21 15:29:16 crc kubenswrapper[5021]: I0121 15:29:16.270338 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Jan 21 15:29:16 crc kubenswrapper[5021]: I0121 15:29:16.361185 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Jan 21 15:29:16 crc kubenswrapper[5021]: I0121 15:29:16.405760 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Jan 21 15:29:16 crc kubenswrapper[5021]: I0121 15:29:16.414479 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Jan 21 15:29:16 crc kubenswrapper[5021]: I0121 15:29:16.461276 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Jan 21 15:29:16 crc kubenswrapper[5021]: I0121 15:29:16.465612 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 21 15:29:16 crc kubenswrapper[5021]: I0121 15:29:16.481456 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Jan 21 15:29:16 crc kubenswrapper[5021]: I0121 15:29:16.597850 5021 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Jan 21 15:29:16 crc kubenswrapper[5021]: I0121 15:29:16.625944 5021 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 21 15:29:16 crc kubenswrapper[5021]: I0121 15:29:16.626173 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://7c93b9e7f5abcc8d6914afa41d2650d0b5c94699d5d9d416a3b9f350c27d0964" gracePeriod=5 Jan 21 15:29:16 crc kubenswrapper[5021]: I0121 15:29:16.764865 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Jan 21 15:29:16 crc kubenswrapper[5021]: I0121 15:29:16.773433 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Jan 21 15:29:16 crc kubenswrapper[5021]: I0121 15:29:16.798309 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Jan 21 15:29:16 crc kubenswrapper[5021]: I0121 15:29:16.804582 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Jan 21 15:29:16 crc kubenswrapper[5021]: I0121 15:29:16.841804 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Jan 21 15:29:17 crc kubenswrapper[5021]: I0121 15:29:17.009036 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Jan 21 15:29:17 crc kubenswrapper[5021]: I0121 15:29:17.030433 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Jan 21 15:29:17 crc kubenswrapper[5021]: I0121 15:29:17.425476 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Jan 21 15:29:17 crc kubenswrapper[5021]: I0121 15:29:17.547495 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Jan 21 15:29:17 crc kubenswrapper[5021]: I0121 15:29:17.575826 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Jan 21 15:29:17 crc kubenswrapper[5021]: I0121 15:29:17.725065 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Jan 21 15:29:17 crc kubenswrapper[5021]: I0121 15:29:17.944333 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Jan 21 15:29:17 crc kubenswrapper[5021]: I0121 15:29:17.945433 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Jan 21 15:29:18 crc kubenswrapper[5021]: I0121 15:29:18.096231 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 21 15:29:18 crc kubenswrapper[5021]: I0121 15:29:18.147874 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Jan 21 15:29:18 crc kubenswrapper[5021]: I0121 15:29:18.163572 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Jan 21 15:29:18 crc kubenswrapper[5021]: I0121 15:29:18.361321 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Jan 21 15:29:18 crc kubenswrapper[5021]: I0121 15:29:18.538627 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Jan 21 15:29:18 crc kubenswrapper[5021]: I0121 15:29:18.542102 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 21 15:29:18 crc kubenswrapper[5021]: I0121 15:29:18.601199 5021 cert_rotation.go:91] certificate rotation detected, shutting down client connections to start using new credentials Jan 21 15:29:18 crc kubenswrapper[5021]: I0121 15:29:18.602039 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 21 15:29:18 crc kubenswrapper[5021]: I0121 15:29:18.683257 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Jan 21 15:29:18 crc kubenswrapper[5021]: I0121 15:29:18.839001 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Jan 21 15:29:19 crc kubenswrapper[5021]: I0121 15:29:19.033530 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Jan 21 15:29:19 crc kubenswrapper[5021]: I0121 15:29:19.211192 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Jan 21 15:29:19 crc kubenswrapper[5021]: I0121 15:29:19.249612 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Jan 21 15:29:19 crc kubenswrapper[5021]: I0121 15:29:19.276805 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Jan 21 15:29:19 crc kubenswrapper[5021]: I0121 15:29:19.375795 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Jan 21 15:29:19 crc kubenswrapper[5021]: I0121 15:29:19.439231 5021 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Jan 21 15:29:19 crc kubenswrapper[5021]: I0121 15:29:19.442352 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Jan 21 15:29:19 crc kubenswrapper[5021]: I0121 15:29:19.563512 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 21 15:29:19 crc kubenswrapper[5021]: I0121 15:29:19.719523 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Jan 21 15:29:19 crc kubenswrapper[5021]: I0121 15:29:19.770071 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 21 15:29:20 crc kubenswrapper[5021]: I0121 15:29:20.309899 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Jan 21 15:29:20 crc kubenswrapper[5021]: I0121 15:29:20.311168 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Jan 21 15:29:22 crc kubenswrapper[5021]: I0121 15:29:22.207863 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 21 15:29:22 crc kubenswrapper[5021]: I0121 15:29:22.208438 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 21 15:29:22 crc kubenswrapper[5021]: I0121 15:29:22.397831 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 21 15:29:22 crc kubenswrapper[5021]: I0121 15:29:22.397959 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 21 15:29:22 crc kubenswrapper[5021]: I0121 15:29:22.397996 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 21 15:29:22 crc kubenswrapper[5021]: I0121 15:29:22.398009 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 15:29:22 crc kubenswrapper[5021]: I0121 15:29:22.398079 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 21 15:29:22 crc kubenswrapper[5021]: I0121 15:29:22.398135 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 15:29:22 crc kubenswrapper[5021]: I0121 15:29:22.398166 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 21 15:29:22 crc kubenswrapper[5021]: I0121 15:29:22.398307 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 15:29:22 crc kubenswrapper[5021]: I0121 15:29:22.398345 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 15:29:22 crc kubenswrapper[5021]: I0121 15:29:22.398438 5021 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Jan 21 15:29:22 crc kubenswrapper[5021]: I0121 15:29:22.398457 5021 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Jan 21 15:29:22 crc kubenswrapper[5021]: I0121 15:29:22.398469 5021 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 21 15:29:22 crc kubenswrapper[5021]: I0121 15:29:22.398481 5021 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Jan 21 15:29:22 crc kubenswrapper[5021]: I0121 15:29:22.409074 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 15:29:22 crc kubenswrapper[5021]: I0121 15:29:22.443338 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 21 15:29:22 crc kubenswrapper[5021]: I0121 15:29:22.443406 5021 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="7c93b9e7f5abcc8d6914afa41d2650d0b5c94699d5d9d416a3b9f350c27d0964" exitCode=137 Jan 21 15:29:22 crc kubenswrapper[5021]: I0121 15:29:22.443478 5021 scope.go:117] "RemoveContainer" containerID="7c93b9e7f5abcc8d6914afa41d2650d0b5c94699d5d9d416a3b9f350c27d0964" Jan 21 15:29:22 crc kubenswrapper[5021]: I0121 15:29:22.443573 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 21 15:29:22 crc kubenswrapper[5021]: I0121 15:29:22.467618 5021 scope.go:117] "RemoveContainer" containerID="7c93b9e7f5abcc8d6914afa41d2650d0b5c94699d5d9d416a3b9f350c27d0964" Jan 21 15:29:22 crc kubenswrapper[5021]: E0121 15:29:22.468491 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7c93b9e7f5abcc8d6914afa41d2650d0b5c94699d5d9d416a3b9f350c27d0964\": container with ID starting with 7c93b9e7f5abcc8d6914afa41d2650d0b5c94699d5d9d416a3b9f350c27d0964 not found: ID does not exist" containerID="7c93b9e7f5abcc8d6914afa41d2650d0b5c94699d5d9d416a3b9f350c27d0964" Jan 21 15:29:22 crc kubenswrapper[5021]: I0121 15:29:22.468540 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7c93b9e7f5abcc8d6914afa41d2650d0b5c94699d5d9d416a3b9f350c27d0964"} err="failed to get container status \"7c93b9e7f5abcc8d6914afa41d2650d0b5c94699d5d9d416a3b9f350c27d0964\": rpc error: code = NotFound desc = could not find container \"7c93b9e7f5abcc8d6914afa41d2650d0b5c94699d5d9d416a3b9f350c27d0964\": container with ID starting with 7c93b9e7f5abcc8d6914afa41d2650d0b5c94699d5d9d416a3b9f350c27d0964 not found: ID does not exist" Jan 21 15:29:22 crc kubenswrapper[5021]: I0121 15:29:22.499531 5021 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 21 15:29:22 crc kubenswrapper[5021]: I0121 15:29:22.746260 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.627929 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-555fcf5468-vsjlm"] Jan 21 15:29:27 crc kubenswrapper[5021]: E0121 15:29:27.628645 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cdbaee95-6f0a-4b0e-8969-0b9fb32f808d" containerName="oauth-openshift" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.628662 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="cdbaee95-6f0a-4b0e-8969-0b9fb32f808d" containerName="oauth-openshift" Jan 21 15:29:27 crc kubenswrapper[5021]: E0121 15:29:27.628674 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.628680 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 21 15:29:27 crc kubenswrapper[5021]: E0121 15:29:27.628698 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a9fdb38-edc6-4315-a91c-9d8489695e24" containerName="installer" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.628705 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a9fdb38-edc6-4315-a91c-9d8489695e24" containerName="installer" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.628816 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.628827 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="cdbaee95-6f0a-4b0e-8969-0b9fb32f808d" containerName="oauth-openshift" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.628835 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a9fdb38-edc6-4315-a91c-9d8489695e24" containerName="installer" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.629330 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-555fcf5468-vsjlm" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.632173 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.632362 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.633628 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.633987 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.634247 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.634363 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.635176 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.635464 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.636649 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.636677 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.637199 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.641771 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.643400 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.649124 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-555fcf5468-vsjlm"] Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.653822 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.661051 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.782149 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/8fb18bba-979d-43dc-8f23-ed07a855f43c-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-555fcf5468-vsjlm\" (UID: \"8fb18bba-979d-43dc-8f23-ed07a855f43c\") " pod="openshift-authentication/oauth-openshift-555fcf5468-vsjlm" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.782215 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/8fb18bba-979d-43dc-8f23-ed07a855f43c-v4-0-config-system-serving-cert\") pod \"oauth-openshift-555fcf5468-vsjlm\" (UID: \"8fb18bba-979d-43dc-8f23-ed07a855f43c\") " pod="openshift-authentication/oauth-openshift-555fcf5468-vsjlm" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.782250 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8fb18bba-979d-43dc-8f23-ed07a855f43c-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-555fcf5468-vsjlm\" (UID: \"8fb18bba-979d-43dc-8f23-ed07a855f43c\") " pod="openshift-authentication/oauth-openshift-555fcf5468-vsjlm" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.782277 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/8fb18bba-979d-43dc-8f23-ed07a855f43c-v4-0-config-user-template-error\") pod \"oauth-openshift-555fcf5468-vsjlm\" (UID: \"8fb18bba-979d-43dc-8f23-ed07a855f43c\") " pod="openshift-authentication/oauth-openshift-555fcf5468-vsjlm" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.782298 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/8fb18bba-979d-43dc-8f23-ed07a855f43c-v4-0-config-system-router-certs\") pod \"oauth-openshift-555fcf5468-vsjlm\" (UID: \"8fb18bba-979d-43dc-8f23-ed07a855f43c\") " pod="openshift-authentication/oauth-openshift-555fcf5468-vsjlm" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.782317 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/8fb18bba-979d-43dc-8f23-ed07a855f43c-v4-0-config-user-template-login\") pod \"oauth-openshift-555fcf5468-vsjlm\" (UID: \"8fb18bba-979d-43dc-8f23-ed07a855f43c\") " pod="openshift-authentication/oauth-openshift-555fcf5468-vsjlm" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.782344 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/8fb18bba-979d-43dc-8f23-ed07a855f43c-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-555fcf5468-vsjlm\" (UID: \"8fb18bba-979d-43dc-8f23-ed07a855f43c\") " pod="openshift-authentication/oauth-openshift-555fcf5468-vsjlm" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.782814 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/8fb18bba-979d-43dc-8f23-ed07a855f43c-v4-0-config-system-session\") pod \"oauth-openshift-555fcf5468-vsjlm\" (UID: \"8fb18bba-979d-43dc-8f23-ed07a855f43c\") " pod="openshift-authentication/oauth-openshift-555fcf5468-vsjlm" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.782970 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/8fb18bba-979d-43dc-8f23-ed07a855f43c-v4-0-config-system-service-ca\") pod \"oauth-openshift-555fcf5468-vsjlm\" (UID: \"8fb18bba-979d-43dc-8f23-ed07a855f43c\") " pod="openshift-authentication/oauth-openshift-555fcf5468-vsjlm" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.783012 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/8fb18bba-979d-43dc-8f23-ed07a855f43c-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-555fcf5468-vsjlm\" (UID: \"8fb18bba-979d-43dc-8f23-ed07a855f43c\") " pod="openshift-authentication/oauth-openshift-555fcf5468-vsjlm" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.783139 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/8fb18bba-979d-43dc-8f23-ed07a855f43c-audit-dir\") pod \"oauth-openshift-555fcf5468-vsjlm\" (UID: \"8fb18bba-979d-43dc-8f23-ed07a855f43c\") " pod="openshift-authentication/oauth-openshift-555fcf5468-vsjlm" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.783340 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wxc2r\" (UniqueName: \"kubernetes.io/projected/8fb18bba-979d-43dc-8f23-ed07a855f43c-kube-api-access-wxc2r\") pod \"oauth-openshift-555fcf5468-vsjlm\" (UID: \"8fb18bba-979d-43dc-8f23-ed07a855f43c\") " pod="openshift-authentication/oauth-openshift-555fcf5468-vsjlm" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.783584 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/8fb18bba-979d-43dc-8f23-ed07a855f43c-v4-0-config-system-cliconfig\") pod \"oauth-openshift-555fcf5468-vsjlm\" (UID: \"8fb18bba-979d-43dc-8f23-ed07a855f43c\") " pod="openshift-authentication/oauth-openshift-555fcf5468-vsjlm" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.783760 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/8fb18bba-979d-43dc-8f23-ed07a855f43c-audit-policies\") pod \"oauth-openshift-555fcf5468-vsjlm\" (UID: \"8fb18bba-979d-43dc-8f23-ed07a855f43c\") " pod="openshift-authentication/oauth-openshift-555fcf5468-vsjlm" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.885144 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wxc2r\" (UniqueName: \"kubernetes.io/projected/8fb18bba-979d-43dc-8f23-ed07a855f43c-kube-api-access-wxc2r\") pod \"oauth-openshift-555fcf5468-vsjlm\" (UID: \"8fb18bba-979d-43dc-8f23-ed07a855f43c\") " pod="openshift-authentication/oauth-openshift-555fcf5468-vsjlm" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.885229 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/8fb18bba-979d-43dc-8f23-ed07a855f43c-v4-0-config-system-cliconfig\") pod \"oauth-openshift-555fcf5468-vsjlm\" (UID: \"8fb18bba-979d-43dc-8f23-ed07a855f43c\") " pod="openshift-authentication/oauth-openshift-555fcf5468-vsjlm" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.885256 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/8fb18bba-979d-43dc-8f23-ed07a855f43c-audit-policies\") pod \"oauth-openshift-555fcf5468-vsjlm\" (UID: \"8fb18bba-979d-43dc-8f23-ed07a855f43c\") " pod="openshift-authentication/oauth-openshift-555fcf5468-vsjlm" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.885312 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/8fb18bba-979d-43dc-8f23-ed07a855f43c-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-555fcf5468-vsjlm\" (UID: \"8fb18bba-979d-43dc-8f23-ed07a855f43c\") " pod="openshift-authentication/oauth-openshift-555fcf5468-vsjlm" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.885348 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/8fb18bba-979d-43dc-8f23-ed07a855f43c-v4-0-config-system-serving-cert\") pod \"oauth-openshift-555fcf5468-vsjlm\" (UID: \"8fb18bba-979d-43dc-8f23-ed07a855f43c\") " pod="openshift-authentication/oauth-openshift-555fcf5468-vsjlm" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.885400 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8fb18bba-979d-43dc-8f23-ed07a855f43c-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-555fcf5468-vsjlm\" (UID: \"8fb18bba-979d-43dc-8f23-ed07a855f43c\") " pod="openshift-authentication/oauth-openshift-555fcf5468-vsjlm" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.885468 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/8fb18bba-979d-43dc-8f23-ed07a855f43c-v4-0-config-user-template-error\") pod \"oauth-openshift-555fcf5468-vsjlm\" (UID: \"8fb18bba-979d-43dc-8f23-ed07a855f43c\") " pod="openshift-authentication/oauth-openshift-555fcf5468-vsjlm" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.885501 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/8fb18bba-979d-43dc-8f23-ed07a855f43c-v4-0-config-system-router-certs\") pod \"oauth-openshift-555fcf5468-vsjlm\" (UID: \"8fb18bba-979d-43dc-8f23-ed07a855f43c\") " pod="openshift-authentication/oauth-openshift-555fcf5468-vsjlm" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.885528 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/8fb18bba-979d-43dc-8f23-ed07a855f43c-v4-0-config-user-template-login\") pod \"oauth-openshift-555fcf5468-vsjlm\" (UID: \"8fb18bba-979d-43dc-8f23-ed07a855f43c\") " pod="openshift-authentication/oauth-openshift-555fcf5468-vsjlm" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.885569 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/8fb18bba-979d-43dc-8f23-ed07a855f43c-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-555fcf5468-vsjlm\" (UID: \"8fb18bba-979d-43dc-8f23-ed07a855f43c\") " pod="openshift-authentication/oauth-openshift-555fcf5468-vsjlm" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.885595 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/8fb18bba-979d-43dc-8f23-ed07a855f43c-v4-0-config-system-session\") pod \"oauth-openshift-555fcf5468-vsjlm\" (UID: \"8fb18bba-979d-43dc-8f23-ed07a855f43c\") " pod="openshift-authentication/oauth-openshift-555fcf5468-vsjlm" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.885614 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/8fb18bba-979d-43dc-8f23-ed07a855f43c-v4-0-config-system-service-ca\") pod \"oauth-openshift-555fcf5468-vsjlm\" (UID: \"8fb18bba-979d-43dc-8f23-ed07a855f43c\") " pod="openshift-authentication/oauth-openshift-555fcf5468-vsjlm" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.885634 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/8fb18bba-979d-43dc-8f23-ed07a855f43c-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-555fcf5468-vsjlm\" (UID: \"8fb18bba-979d-43dc-8f23-ed07a855f43c\") " pod="openshift-authentication/oauth-openshift-555fcf5468-vsjlm" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.885675 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/8fb18bba-979d-43dc-8f23-ed07a855f43c-audit-dir\") pod \"oauth-openshift-555fcf5468-vsjlm\" (UID: \"8fb18bba-979d-43dc-8f23-ed07a855f43c\") " pod="openshift-authentication/oauth-openshift-555fcf5468-vsjlm" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.886252 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/8fb18bba-979d-43dc-8f23-ed07a855f43c-audit-dir\") pod \"oauth-openshift-555fcf5468-vsjlm\" (UID: \"8fb18bba-979d-43dc-8f23-ed07a855f43c\") " pod="openshift-authentication/oauth-openshift-555fcf5468-vsjlm" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.886545 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/8fb18bba-979d-43dc-8f23-ed07a855f43c-v4-0-config-system-cliconfig\") pod \"oauth-openshift-555fcf5468-vsjlm\" (UID: \"8fb18bba-979d-43dc-8f23-ed07a855f43c\") " pod="openshift-authentication/oauth-openshift-555fcf5468-vsjlm" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.886868 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/8fb18bba-979d-43dc-8f23-ed07a855f43c-audit-policies\") pod \"oauth-openshift-555fcf5468-vsjlm\" (UID: \"8fb18bba-979d-43dc-8f23-ed07a855f43c\") " pod="openshift-authentication/oauth-openshift-555fcf5468-vsjlm" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.887442 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/8fb18bba-979d-43dc-8f23-ed07a855f43c-v4-0-config-system-service-ca\") pod \"oauth-openshift-555fcf5468-vsjlm\" (UID: \"8fb18bba-979d-43dc-8f23-ed07a855f43c\") " pod="openshift-authentication/oauth-openshift-555fcf5468-vsjlm" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.888250 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8fb18bba-979d-43dc-8f23-ed07a855f43c-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-555fcf5468-vsjlm\" (UID: \"8fb18bba-979d-43dc-8f23-ed07a855f43c\") " pod="openshift-authentication/oauth-openshift-555fcf5468-vsjlm" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.900352 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/8fb18bba-979d-43dc-8f23-ed07a855f43c-v4-0-config-user-template-error\") pod \"oauth-openshift-555fcf5468-vsjlm\" (UID: \"8fb18bba-979d-43dc-8f23-ed07a855f43c\") " pod="openshift-authentication/oauth-openshift-555fcf5468-vsjlm" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.900409 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/8fb18bba-979d-43dc-8f23-ed07a855f43c-v4-0-config-system-session\") pod \"oauth-openshift-555fcf5468-vsjlm\" (UID: \"8fb18bba-979d-43dc-8f23-ed07a855f43c\") " pod="openshift-authentication/oauth-openshift-555fcf5468-vsjlm" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.900433 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/8fb18bba-979d-43dc-8f23-ed07a855f43c-v4-0-config-system-serving-cert\") pod \"oauth-openshift-555fcf5468-vsjlm\" (UID: \"8fb18bba-979d-43dc-8f23-ed07a855f43c\") " pod="openshift-authentication/oauth-openshift-555fcf5468-vsjlm" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.900533 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/8fb18bba-979d-43dc-8f23-ed07a855f43c-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-555fcf5468-vsjlm\" (UID: \"8fb18bba-979d-43dc-8f23-ed07a855f43c\") " pod="openshift-authentication/oauth-openshift-555fcf5468-vsjlm" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.900558 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/8fb18bba-979d-43dc-8f23-ed07a855f43c-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-555fcf5468-vsjlm\" (UID: \"8fb18bba-979d-43dc-8f23-ed07a855f43c\") " pod="openshift-authentication/oauth-openshift-555fcf5468-vsjlm" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.900794 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/8fb18bba-979d-43dc-8f23-ed07a855f43c-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-555fcf5468-vsjlm\" (UID: \"8fb18bba-979d-43dc-8f23-ed07a855f43c\") " pod="openshift-authentication/oauth-openshift-555fcf5468-vsjlm" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.900946 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/8fb18bba-979d-43dc-8f23-ed07a855f43c-v4-0-config-user-template-login\") pod \"oauth-openshift-555fcf5468-vsjlm\" (UID: \"8fb18bba-979d-43dc-8f23-ed07a855f43c\") " pod="openshift-authentication/oauth-openshift-555fcf5468-vsjlm" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.900997 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/8fb18bba-979d-43dc-8f23-ed07a855f43c-v4-0-config-system-router-certs\") pod \"oauth-openshift-555fcf5468-vsjlm\" (UID: \"8fb18bba-979d-43dc-8f23-ed07a855f43c\") " pod="openshift-authentication/oauth-openshift-555fcf5468-vsjlm" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.904459 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wxc2r\" (UniqueName: \"kubernetes.io/projected/8fb18bba-979d-43dc-8f23-ed07a855f43c-kube-api-access-wxc2r\") pod \"oauth-openshift-555fcf5468-vsjlm\" (UID: \"8fb18bba-979d-43dc-8f23-ed07a855f43c\") " pod="openshift-authentication/oauth-openshift-555fcf5468-vsjlm" Jan 21 15:29:27 crc kubenswrapper[5021]: I0121 15:29:27.952940 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-555fcf5468-vsjlm" Jan 21 15:29:28 crc kubenswrapper[5021]: I0121 15:29:28.389503 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-555fcf5468-vsjlm"] Jan 21 15:29:28 crc kubenswrapper[5021]: I0121 15:29:28.490827 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-555fcf5468-vsjlm" event={"ID":"8fb18bba-979d-43dc-8f23-ed07a855f43c","Type":"ContainerStarted","Data":"b4204ce87cb5850601315d9b2044cbb8e542f3a38f38bdae11b8ea73ab1ce3ea"} Jan 21 15:29:30 crc kubenswrapper[5021]: I0121 15:29:30.508134 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-555fcf5468-vsjlm" event={"ID":"8fb18bba-979d-43dc-8f23-ed07a855f43c","Type":"ContainerStarted","Data":"5af2a3558630bbe287a84a87e819d45278d0ce0f735699f51105d95f0c1a5b5a"} Jan 21 15:29:30 crc kubenswrapper[5021]: I0121 15:29:30.508218 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-555fcf5468-vsjlm" Jan 21 15:29:30 crc kubenswrapper[5021]: I0121 15:29:30.514350 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-555fcf5468-vsjlm" Jan 21 15:29:30 crc kubenswrapper[5021]: I0121 15:29:30.540554 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-555fcf5468-vsjlm" podStartSLOduration=70.54050852 podStartE2EDuration="1m10.54050852s" podCreationTimestamp="2026-01-21 15:28:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:29:30.53630871 +0000 UTC m=+312.071422639" watchObservedRunningTime="2026-01-21 15:29:30.54050852 +0000 UTC m=+312.075622439" Jan 21 15:29:31 crc kubenswrapper[5021]: I0121 15:29:31.917501 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Jan 21 15:29:39 crc kubenswrapper[5021]: I0121 15:29:39.123214 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Jan 21 15:29:42 crc kubenswrapper[5021]: I0121 15:29:42.300659 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-xlz6c"] Jan 21 15:29:42 crc kubenswrapper[5021]: I0121 15:29:42.302001 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-xlz6c" podUID="521d3dca-6ae7-48f6-a3bc-859493564f8d" containerName="controller-manager" containerID="cri-o://f3653583bafdb7164675e125f282c25acbd352f9800d43ca5dda8d72eb83fe76" gracePeriod=30 Jan 21 15:29:42 crc kubenswrapper[5021]: I0121 15:29:42.412476 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-qcz9k"] Jan 21 15:29:42 crc kubenswrapper[5021]: I0121 15:29:42.412927 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qcz9k" podUID="b17f4af7-2215-4afd-810b-ae1f9a5ca41a" containerName="route-controller-manager" containerID="cri-o://2948390a1a0f2621360b10ad0a73165e182ffa557d9af13c423c6c76dec7ac77" gracePeriod=30 Jan 21 15:29:42 crc kubenswrapper[5021]: I0121 15:29:42.620552 5021 generic.go:334] "Generic (PLEG): container finished" podID="521d3dca-6ae7-48f6-a3bc-859493564f8d" containerID="f3653583bafdb7164675e125f282c25acbd352f9800d43ca5dda8d72eb83fe76" exitCode=0 Jan 21 15:29:42 crc kubenswrapper[5021]: I0121 15:29:42.620695 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-xlz6c" event={"ID":"521d3dca-6ae7-48f6-a3bc-859493564f8d","Type":"ContainerDied","Data":"f3653583bafdb7164675e125f282c25acbd352f9800d43ca5dda8d72eb83fe76"} Jan 21 15:29:42 crc kubenswrapper[5021]: I0121 15:29:42.629956 5021 generic.go:334] "Generic (PLEG): container finished" podID="b17f4af7-2215-4afd-810b-ae1f9a5ca41a" containerID="2948390a1a0f2621360b10ad0a73165e182ffa557d9af13c423c6c76dec7ac77" exitCode=0 Jan 21 15:29:42 crc kubenswrapper[5021]: I0121 15:29:42.630037 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qcz9k" event={"ID":"b17f4af7-2215-4afd-810b-ae1f9a5ca41a","Type":"ContainerDied","Data":"2948390a1a0f2621360b10ad0a73165e182ffa557d9af13c423c6c76dec7ac77"} Jan 21 15:29:42 crc kubenswrapper[5021]: E0121 15:29:42.667678 5021 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb17f4af7_2215_4afd_810b_ae1f9a5ca41a.slice/crio-2948390a1a0f2621360b10ad0a73165e182ffa557d9af13c423c6c76dec7ac77.scope\": RecentStats: unable to find data in memory cache]" Jan 21 15:29:42 crc kubenswrapper[5021]: I0121 15:29:42.886337 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-xlz6c" Jan 21 15:29:43 crc kubenswrapper[5021]: I0121 15:29:43.015771 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qcz9k" Jan 21 15:29:43 crc kubenswrapper[5021]: I0121 15:29:43.031927 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/521d3dca-6ae7-48f6-a3bc-859493564f8d-serving-cert\") pod \"521d3dca-6ae7-48f6-a3bc-859493564f8d\" (UID: \"521d3dca-6ae7-48f6-a3bc-859493564f8d\") " Jan 21 15:29:43 crc kubenswrapper[5021]: I0121 15:29:43.032012 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mwjdn\" (UniqueName: \"kubernetes.io/projected/521d3dca-6ae7-48f6-a3bc-859493564f8d-kube-api-access-mwjdn\") pod \"521d3dca-6ae7-48f6-a3bc-859493564f8d\" (UID: \"521d3dca-6ae7-48f6-a3bc-859493564f8d\") " Jan 21 15:29:43 crc kubenswrapper[5021]: I0121 15:29:43.032058 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/521d3dca-6ae7-48f6-a3bc-859493564f8d-config\") pod \"521d3dca-6ae7-48f6-a3bc-859493564f8d\" (UID: \"521d3dca-6ae7-48f6-a3bc-859493564f8d\") " Jan 21 15:29:43 crc kubenswrapper[5021]: I0121 15:29:43.032143 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/521d3dca-6ae7-48f6-a3bc-859493564f8d-proxy-ca-bundles\") pod \"521d3dca-6ae7-48f6-a3bc-859493564f8d\" (UID: \"521d3dca-6ae7-48f6-a3bc-859493564f8d\") " Jan 21 15:29:43 crc kubenswrapper[5021]: I0121 15:29:43.032205 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/521d3dca-6ae7-48f6-a3bc-859493564f8d-client-ca\") pod \"521d3dca-6ae7-48f6-a3bc-859493564f8d\" (UID: \"521d3dca-6ae7-48f6-a3bc-859493564f8d\") " Jan 21 15:29:43 crc kubenswrapper[5021]: I0121 15:29:43.033256 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/521d3dca-6ae7-48f6-a3bc-859493564f8d-client-ca" (OuterVolumeSpecName: "client-ca") pod "521d3dca-6ae7-48f6-a3bc-859493564f8d" (UID: "521d3dca-6ae7-48f6-a3bc-859493564f8d"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:29:43 crc kubenswrapper[5021]: I0121 15:29:43.033519 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/521d3dca-6ae7-48f6-a3bc-859493564f8d-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "521d3dca-6ae7-48f6-a3bc-859493564f8d" (UID: "521d3dca-6ae7-48f6-a3bc-859493564f8d"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:29:43 crc kubenswrapper[5021]: I0121 15:29:43.036226 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/521d3dca-6ae7-48f6-a3bc-859493564f8d-config" (OuterVolumeSpecName: "config") pod "521d3dca-6ae7-48f6-a3bc-859493564f8d" (UID: "521d3dca-6ae7-48f6-a3bc-859493564f8d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:29:43 crc kubenswrapper[5021]: I0121 15:29:43.041217 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/521d3dca-6ae7-48f6-a3bc-859493564f8d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "521d3dca-6ae7-48f6-a3bc-859493564f8d" (UID: "521d3dca-6ae7-48f6-a3bc-859493564f8d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:29:43 crc kubenswrapper[5021]: I0121 15:29:43.041357 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/521d3dca-6ae7-48f6-a3bc-859493564f8d-kube-api-access-mwjdn" (OuterVolumeSpecName: "kube-api-access-mwjdn") pod "521d3dca-6ae7-48f6-a3bc-859493564f8d" (UID: "521d3dca-6ae7-48f6-a3bc-859493564f8d"). InnerVolumeSpecName "kube-api-access-mwjdn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:29:43 crc kubenswrapper[5021]: I0121 15:29:43.133699 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b17f4af7-2215-4afd-810b-ae1f9a5ca41a-config\") pod \"b17f4af7-2215-4afd-810b-ae1f9a5ca41a\" (UID: \"b17f4af7-2215-4afd-810b-ae1f9a5ca41a\") " Jan 21 15:29:43 crc kubenswrapper[5021]: I0121 15:29:43.134198 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b17f4af7-2215-4afd-810b-ae1f9a5ca41a-client-ca\") pod \"b17f4af7-2215-4afd-810b-ae1f9a5ca41a\" (UID: \"b17f4af7-2215-4afd-810b-ae1f9a5ca41a\") " Jan 21 15:29:43 crc kubenswrapper[5021]: I0121 15:29:43.134326 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b17f4af7-2215-4afd-810b-ae1f9a5ca41a-serving-cert\") pod \"b17f4af7-2215-4afd-810b-ae1f9a5ca41a\" (UID: \"b17f4af7-2215-4afd-810b-ae1f9a5ca41a\") " Jan 21 15:29:43 crc kubenswrapper[5021]: I0121 15:29:43.134531 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xlxt8\" (UniqueName: \"kubernetes.io/projected/b17f4af7-2215-4afd-810b-ae1f9a5ca41a-kube-api-access-xlxt8\") pod \"b17f4af7-2215-4afd-810b-ae1f9a5ca41a\" (UID: \"b17f4af7-2215-4afd-810b-ae1f9a5ca41a\") " Jan 21 15:29:43 crc kubenswrapper[5021]: I0121 15:29:43.134956 5021 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/521d3dca-6ae7-48f6-a3bc-859493564f8d-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 15:29:43 crc kubenswrapper[5021]: I0121 15:29:43.135038 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mwjdn\" (UniqueName: \"kubernetes.io/projected/521d3dca-6ae7-48f6-a3bc-859493564f8d-kube-api-access-mwjdn\") on node \"crc\" DevicePath \"\"" Jan 21 15:29:43 crc kubenswrapper[5021]: I0121 15:29:43.135112 5021 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/521d3dca-6ae7-48f6-a3bc-859493564f8d-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:29:43 crc kubenswrapper[5021]: I0121 15:29:43.135172 5021 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/521d3dca-6ae7-48f6-a3bc-859493564f8d-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 21 15:29:43 crc kubenswrapper[5021]: I0121 15:29:43.135236 5021 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/521d3dca-6ae7-48f6-a3bc-859493564f8d-client-ca\") on node \"crc\" DevicePath \"\"" Jan 21 15:29:43 crc kubenswrapper[5021]: I0121 15:29:43.135250 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b17f4af7-2215-4afd-810b-ae1f9a5ca41a-config" (OuterVolumeSpecName: "config") pod "b17f4af7-2215-4afd-810b-ae1f9a5ca41a" (UID: "b17f4af7-2215-4afd-810b-ae1f9a5ca41a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:29:43 crc kubenswrapper[5021]: I0121 15:29:43.135467 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b17f4af7-2215-4afd-810b-ae1f9a5ca41a-client-ca" (OuterVolumeSpecName: "client-ca") pod "b17f4af7-2215-4afd-810b-ae1f9a5ca41a" (UID: "b17f4af7-2215-4afd-810b-ae1f9a5ca41a"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:29:43 crc kubenswrapper[5021]: I0121 15:29:43.140154 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b17f4af7-2215-4afd-810b-ae1f9a5ca41a-kube-api-access-xlxt8" (OuterVolumeSpecName: "kube-api-access-xlxt8") pod "b17f4af7-2215-4afd-810b-ae1f9a5ca41a" (UID: "b17f4af7-2215-4afd-810b-ae1f9a5ca41a"). InnerVolumeSpecName "kube-api-access-xlxt8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:29:43 crc kubenswrapper[5021]: I0121 15:29:43.140146 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b17f4af7-2215-4afd-810b-ae1f9a5ca41a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "b17f4af7-2215-4afd-810b-ae1f9a5ca41a" (UID: "b17f4af7-2215-4afd-810b-ae1f9a5ca41a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:29:43 crc kubenswrapper[5021]: I0121 15:29:43.237047 5021 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b17f4af7-2215-4afd-810b-ae1f9a5ca41a-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:29:43 crc kubenswrapper[5021]: I0121 15:29:43.237124 5021 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b17f4af7-2215-4afd-810b-ae1f9a5ca41a-client-ca\") on node \"crc\" DevicePath \"\"" Jan 21 15:29:43 crc kubenswrapper[5021]: I0121 15:29:43.237150 5021 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b17f4af7-2215-4afd-810b-ae1f9a5ca41a-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 15:29:43 crc kubenswrapper[5021]: I0121 15:29:43.237169 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xlxt8\" (UniqueName: \"kubernetes.io/projected/b17f4af7-2215-4afd-810b-ae1f9a5ca41a-kube-api-access-xlxt8\") on node \"crc\" DevicePath \"\"" Jan 21 15:29:43 crc kubenswrapper[5021]: I0121 15:29:43.639965 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qcz9k" Jan 21 15:29:43 crc kubenswrapper[5021]: I0121 15:29:43.639962 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qcz9k" event={"ID":"b17f4af7-2215-4afd-810b-ae1f9a5ca41a","Type":"ContainerDied","Data":"517689de5d33d982690d553bd3a06b73242552eb0aa412487516a946cddf9262"} Jan 21 15:29:43 crc kubenswrapper[5021]: I0121 15:29:43.640121 5021 scope.go:117] "RemoveContainer" containerID="2948390a1a0f2621360b10ad0a73165e182ffa557d9af13c423c6c76dec7ac77" Jan 21 15:29:43 crc kubenswrapper[5021]: I0121 15:29:43.647686 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-xlz6c" event={"ID":"521d3dca-6ae7-48f6-a3bc-859493564f8d","Type":"ContainerDied","Data":"cf5caa5398b1e7839be4ffcbed1d10007428320f2d4443073644cb46053a1690"} Jan 21 15:29:43 crc kubenswrapper[5021]: I0121 15:29:43.647768 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-xlz6c" Jan 21 15:29:43 crc kubenswrapper[5021]: I0121 15:29:43.662355 5021 scope.go:117] "RemoveContainer" containerID="f3653583bafdb7164675e125f282c25acbd352f9800d43ca5dda8d72eb83fe76" Jan 21 15:29:43 crc kubenswrapper[5021]: I0121 15:29:43.679126 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-xlz6c"] Jan 21 15:29:43 crc kubenswrapper[5021]: I0121 15:29:43.699364 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-xlz6c"] Jan 21 15:29:43 crc kubenswrapper[5021]: I0121 15:29:43.703532 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-qcz9k"] Jan 21 15:29:43 crc kubenswrapper[5021]: I0121 15:29:43.706600 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-qcz9k"] Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.646375 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7cd7b8dc84-ksqdz"] Jan 21 15:29:44 crc kubenswrapper[5021]: E0121 15:29:44.646791 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="521d3dca-6ae7-48f6-a3bc-859493564f8d" containerName="controller-manager" Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.646814 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="521d3dca-6ae7-48f6-a3bc-859493564f8d" containerName="controller-manager" Jan 21 15:29:44 crc kubenswrapper[5021]: E0121 15:29:44.646833 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b17f4af7-2215-4afd-810b-ae1f9a5ca41a" containerName="route-controller-manager" Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.646844 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="b17f4af7-2215-4afd-810b-ae1f9a5ca41a" containerName="route-controller-manager" Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.647027 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="b17f4af7-2215-4afd-810b-ae1f9a5ca41a" containerName="route-controller-manager" Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.647054 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="521d3dca-6ae7-48f6-a3bc-859493564f8d" containerName="controller-manager" Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.647714 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7cd7b8dc84-ksqdz" Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.654326 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.654577 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.654730 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.654949 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.655222 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.655497 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.657105 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-58888f8468-mr52b"] Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.657930 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/933098cc-66e6-4535-bbb6-c7daa5630370-config\") pod \"route-controller-manager-7cd7b8dc84-ksqdz\" (UID: \"933098cc-66e6-4535-bbb6-c7daa5630370\") " pod="openshift-route-controller-manager/route-controller-manager-7cd7b8dc84-ksqdz" Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.658047 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/933098cc-66e6-4535-bbb6-c7daa5630370-client-ca\") pod \"route-controller-manager-7cd7b8dc84-ksqdz\" (UID: \"933098cc-66e6-4535-bbb6-c7daa5630370\") " pod="openshift-route-controller-manager/route-controller-manager-7cd7b8dc84-ksqdz" Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.658122 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/933098cc-66e6-4535-bbb6-c7daa5630370-serving-cert\") pod \"route-controller-manager-7cd7b8dc84-ksqdz\" (UID: \"933098cc-66e6-4535-bbb6-c7daa5630370\") " pod="openshift-route-controller-manager/route-controller-manager-7cd7b8dc84-ksqdz" Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.658227 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qzq4x\" (UniqueName: \"kubernetes.io/projected/933098cc-66e6-4535-bbb6-c7daa5630370-kube-api-access-qzq4x\") pod \"route-controller-manager-7cd7b8dc84-ksqdz\" (UID: \"933098cc-66e6-4535-bbb6-c7daa5630370\") " pod="openshift-route-controller-manager/route-controller-manager-7cd7b8dc84-ksqdz" Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.658142 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-58888f8468-mr52b" Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.663452 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-58888f8468-mr52b"] Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.664642 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.668808 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7cd7b8dc84-ksqdz"] Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.670767 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.670864 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.671302 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.671582 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.672803 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.674923 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.746516 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="521d3dca-6ae7-48f6-a3bc-859493564f8d" path="/var/lib/kubelet/pods/521d3dca-6ae7-48f6-a3bc-859493564f8d/volumes" Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.747574 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b17f4af7-2215-4afd-810b-ae1f9a5ca41a" path="/var/lib/kubelet/pods/b17f4af7-2215-4afd-810b-ae1f9a5ca41a/volumes" Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.759162 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qzq4x\" (UniqueName: \"kubernetes.io/projected/933098cc-66e6-4535-bbb6-c7daa5630370-kube-api-access-qzq4x\") pod \"route-controller-manager-7cd7b8dc84-ksqdz\" (UID: \"933098cc-66e6-4535-bbb6-c7daa5630370\") " pod="openshift-route-controller-manager/route-controller-manager-7cd7b8dc84-ksqdz" Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.759226 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/933098cc-66e6-4535-bbb6-c7daa5630370-config\") pod \"route-controller-manager-7cd7b8dc84-ksqdz\" (UID: \"933098cc-66e6-4535-bbb6-c7daa5630370\") " pod="openshift-route-controller-manager/route-controller-manager-7cd7b8dc84-ksqdz" Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.759267 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/933098cc-66e6-4535-bbb6-c7daa5630370-client-ca\") pod \"route-controller-manager-7cd7b8dc84-ksqdz\" (UID: \"933098cc-66e6-4535-bbb6-c7daa5630370\") " pod="openshift-route-controller-manager/route-controller-manager-7cd7b8dc84-ksqdz" Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.759297 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/933098cc-66e6-4535-bbb6-c7daa5630370-serving-cert\") pod \"route-controller-manager-7cd7b8dc84-ksqdz\" (UID: \"933098cc-66e6-4535-bbb6-c7daa5630370\") " pod="openshift-route-controller-manager/route-controller-manager-7cd7b8dc84-ksqdz" Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.760797 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/933098cc-66e6-4535-bbb6-c7daa5630370-client-ca\") pod \"route-controller-manager-7cd7b8dc84-ksqdz\" (UID: \"933098cc-66e6-4535-bbb6-c7daa5630370\") " pod="openshift-route-controller-manager/route-controller-manager-7cd7b8dc84-ksqdz" Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.761049 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/933098cc-66e6-4535-bbb6-c7daa5630370-config\") pod \"route-controller-manager-7cd7b8dc84-ksqdz\" (UID: \"933098cc-66e6-4535-bbb6-c7daa5630370\") " pod="openshift-route-controller-manager/route-controller-manager-7cd7b8dc84-ksqdz" Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.764613 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/933098cc-66e6-4535-bbb6-c7daa5630370-serving-cert\") pod \"route-controller-manager-7cd7b8dc84-ksqdz\" (UID: \"933098cc-66e6-4535-bbb6-c7daa5630370\") " pod="openshift-route-controller-manager/route-controller-manager-7cd7b8dc84-ksqdz" Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.780570 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qzq4x\" (UniqueName: \"kubernetes.io/projected/933098cc-66e6-4535-bbb6-c7daa5630370-kube-api-access-qzq4x\") pod \"route-controller-manager-7cd7b8dc84-ksqdz\" (UID: \"933098cc-66e6-4535-bbb6-c7daa5630370\") " pod="openshift-route-controller-manager/route-controller-manager-7cd7b8dc84-ksqdz" Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.861696 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/afe458b5-c2ef-408a-ab46-4b87b42afb08-serving-cert\") pod \"controller-manager-58888f8468-mr52b\" (UID: \"afe458b5-c2ef-408a-ab46-4b87b42afb08\") " pod="openshift-controller-manager/controller-manager-58888f8468-mr52b" Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.861810 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/afe458b5-c2ef-408a-ab46-4b87b42afb08-proxy-ca-bundles\") pod \"controller-manager-58888f8468-mr52b\" (UID: \"afe458b5-c2ef-408a-ab46-4b87b42afb08\") " pod="openshift-controller-manager/controller-manager-58888f8468-mr52b" Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.861837 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/afe458b5-c2ef-408a-ab46-4b87b42afb08-client-ca\") pod \"controller-manager-58888f8468-mr52b\" (UID: \"afe458b5-c2ef-408a-ab46-4b87b42afb08\") " pod="openshift-controller-manager/controller-manager-58888f8468-mr52b" Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.861903 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/afe458b5-c2ef-408a-ab46-4b87b42afb08-config\") pod \"controller-manager-58888f8468-mr52b\" (UID: \"afe458b5-c2ef-408a-ab46-4b87b42afb08\") " pod="openshift-controller-manager/controller-manager-58888f8468-mr52b" Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.861970 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tq6ch\" (UniqueName: \"kubernetes.io/projected/afe458b5-c2ef-408a-ab46-4b87b42afb08-kube-api-access-tq6ch\") pod \"controller-manager-58888f8468-mr52b\" (UID: \"afe458b5-c2ef-408a-ab46-4b87b42afb08\") " pod="openshift-controller-manager/controller-manager-58888f8468-mr52b" Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.963538 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/afe458b5-c2ef-408a-ab46-4b87b42afb08-serving-cert\") pod \"controller-manager-58888f8468-mr52b\" (UID: \"afe458b5-c2ef-408a-ab46-4b87b42afb08\") " pod="openshift-controller-manager/controller-manager-58888f8468-mr52b" Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.963667 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/afe458b5-c2ef-408a-ab46-4b87b42afb08-proxy-ca-bundles\") pod \"controller-manager-58888f8468-mr52b\" (UID: \"afe458b5-c2ef-408a-ab46-4b87b42afb08\") " pod="openshift-controller-manager/controller-manager-58888f8468-mr52b" Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.963716 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/afe458b5-c2ef-408a-ab46-4b87b42afb08-client-ca\") pod \"controller-manager-58888f8468-mr52b\" (UID: \"afe458b5-c2ef-408a-ab46-4b87b42afb08\") " pod="openshift-controller-manager/controller-manager-58888f8468-mr52b" Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.963773 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/afe458b5-c2ef-408a-ab46-4b87b42afb08-config\") pod \"controller-manager-58888f8468-mr52b\" (UID: \"afe458b5-c2ef-408a-ab46-4b87b42afb08\") " pod="openshift-controller-manager/controller-manager-58888f8468-mr52b" Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.963810 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tq6ch\" (UniqueName: \"kubernetes.io/projected/afe458b5-c2ef-408a-ab46-4b87b42afb08-kube-api-access-tq6ch\") pod \"controller-manager-58888f8468-mr52b\" (UID: \"afe458b5-c2ef-408a-ab46-4b87b42afb08\") " pod="openshift-controller-manager/controller-manager-58888f8468-mr52b" Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.965523 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/afe458b5-c2ef-408a-ab46-4b87b42afb08-config\") pod \"controller-manager-58888f8468-mr52b\" (UID: \"afe458b5-c2ef-408a-ab46-4b87b42afb08\") " pod="openshift-controller-manager/controller-manager-58888f8468-mr52b" Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.966160 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/afe458b5-c2ef-408a-ab46-4b87b42afb08-proxy-ca-bundles\") pod \"controller-manager-58888f8468-mr52b\" (UID: \"afe458b5-c2ef-408a-ab46-4b87b42afb08\") " pod="openshift-controller-manager/controller-manager-58888f8468-mr52b" Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.966953 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/afe458b5-c2ef-408a-ab46-4b87b42afb08-client-ca\") pod \"controller-manager-58888f8468-mr52b\" (UID: \"afe458b5-c2ef-408a-ab46-4b87b42afb08\") " pod="openshift-controller-manager/controller-manager-58888f8468-mr52b" Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.969565 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/afe458b5-c2ef-408a-ab46-4b87b42afb08-serving-cert\") pod \"controller-manager-58888f8468-mr52b\" (UID: \"afe458b5-c2ef-408a-ab46-4b87b42afb08\") " pod="openshift-controller-manager/controller-manager-58888f8468-mr52b" Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.980295 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7cd7b8dc84-ksqdz" Jan 21 15:29:44 crc kubenswrapper[5021]: I0121 15:29:44.995151 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tq6ch\" (UniqueName: \"kubernetes.io/projected/afe458b5-c2ef-408a-ab46-4b87b42afb08-kube-api-access-tq6ch\") pod \"controller-manager-58888f8468-mr52b\" (UID: \"afe458b5-c2ef-408a-ab46-4b87b42afb08\") " pod="openshift-controller-manager/controller-manager-58888f8468-mr52b" Jan 21 15:29:45 crc kubenswrapper[5021]: I0121 15:29:45.236900 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7cd7b8dc84-ksqdz"] Jan 21 15:29:45 crc kubenswrapper[5021]: W0121 15:29:45.241046 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod933098cc_66e6_4535_bbb6_c7daa5630370.slice/crio-85fb79990bc26e0d67446f5dc1ceb34c46110a73fa13ae4cac35e1683fda0b9a WatchSource:0}: Error finding container 85fb79990bc26e0d67446f5dc1ceb34c46110a73fa13ae4cac35e1683fda0b9a: Status 404 returned error can't find the container with id 85fb79990bc26e0d67446f5dc1ceb34c46110a73fa13ae4cac35e1683fda0b9a Jan 21 15:29:45 crc kubenswrapper[5021]: I0121 15:29:45.286144 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-58888f8468-mr52b" Jan 21 15:29:45 crc kubenswrapper[5021]: I0121 15:29:45.519805 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-58888f8468-mr52b"] Jan 21 15:29:45 crc kubenswrapper[5021]: W0121 15:29:45.529012 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podafe458b5_c2ef_408a_ab46_4b87b42afb08.slice/crio-88269991cc5f0a7a672133fc9721645f57a50b47a18c5180e471216b01ce6082 WatchSource:0}: Error finding container 88269991cc5f0a7a672133fc9721645f57a50b47a18c5180e471216b01ce6082: Status 404 returned error can't find the container with id 88269991cc5f0a7a672133fc9721645f57a50b47a18c5180e471216b01ce6082 Jan 21 15:29:45 crc kubenswrapper[5021]: I0121 15:29:45.670702 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7cd7b8dc84-ksqdz" event={"ID":"933098cc-66e6-4535-bbb6-c7daa5630370","Type":"ContainerStarted","Data":"319723af8fa8f66852068082f8cbc5741c65551eddc3d01e229506f00e4ee50c"} Jan 21 15:29:45 crc kubenswrapper[5021]: I0121 15:29:45.670751 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7cd7b8dc84-ksqdz" event={"ID":"933098cc-66e6-4535-bbb6-c7daa5630370","Type":"ContainerStarted","Data":"85fb79990bc26e0d67446f5dc1ceb34c46110a73fa13ae4cac35e1683fda0b9a"} Jan 21 15:29:45 crc kubenswrapper[5021]: I0121 15:29:45.670974 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-7cd7b8dc84-ksqdz" Jan 21 15:29:45 crc kubenswrapper[5021]: I0121 15:29:45.678162 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-58888f8468-mr52b" event={"ID":"afe458b5-c2ef-408a-ab46-4b87b42afb08","Type":"ContainerStarted","Data":"e0e6c98155626b5a4c151376d3c496f3344d061da608b5e468fbb233493d0082"} Jan 21 15:29:45 crc kubenswrapper[5021]: I0121 15:29:45.678345 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-7cd7b8dc84-ksqdz" Jan 21 15:29:45 crc kubenswrapper[5021]: I0121 15:29:45.678364 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-58888f8468-mr52b" event={"ID":"afe458b5-c2ef-408a-ab46-4b87b42afb08","Type":"ContainerStarted","Data":"88269991cc5f0a7a672133fc9721645f57a50b47a18c5180e471216b01ce6082"} Jan 21 15:29:45 crc kubenswrapper[5021]: I0121 15:29:45.678588 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-58888f8468-mr52b" Jan 21 15:29:45 crc kubenswrapper[5021]: I0121 15:29:45.679700 5021 patch_prober.go:28] interesting pod/controller-manager-58888f8468-mr52b container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.64:8443/healthz\": dial tcp 10.217.0.64:8443: connect: connection refused" start-of-body= Jan 21 15:29:45 crc kubenswrapper[5021]: I0121 15:29:45.679761 5021 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-58888f8468-mr52b" podUID="afe458b5-c2ef-408a-ab46-4b87b42afb08" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.64:8443/healthz\": dial tcp 10.217.0.64:8443: connect: connection refused" Jan 21 15:29:45 crc kubenswrapper[5021]: I0121 15:29:45.703026 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-7cd7b8dc84-ksqdz" podStartSLOduration=3.702987631 podStartE2EDuration="3.702987631s" podCreationTimestamp="2026-01-21 15:29:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:29:45.694415913 +0000 UTC m=+327.229529822" watchObservedRunningTime="2026-01-21 15:29:45.702987631 +0000 UTC m=+327.238101530" Jan 21 15:29:45 crc kubenswrapper[5021]: I0121 15:29:45.723302 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-58888f8468-mr52b" podStartSLOduration=3.723274685 podStartE2EDuration="3.723274685s" podCreationTimestamp="2026-01-21 15:29:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:29:45.71927402 +0000 UTC m=+327.254387909" watchObservedRunningTime="2026-01-21 15:29:45.723274685 +0000 UTC m=+327.258388584" Jan 21 15:29:46 crc kubenswrapper[5021]: I0121 15:29:46.690585 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-58888f8468-mr52b" Jan 21 15:29:52 crc kubenswrapper[5021]: I0121 15:29:52.008728 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Jan 21 15:30:00 crc kubenswrapper[5021]: I0121 15:30:00.188603 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483490-dsk6g"] Jan 21 15:30:00 crc kubenswrapper[5021]: I0121 15:30:00.189837 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483490-dsk6g" Jan 21 15:30:00 crc kubenswrapper[5021]: I0121 15:30:00.197210 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 21 15:30:00 crc kubenswrapper[5021]: I0121 15:30:00.197210 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 21 15:30:00 crc kubenswrapper[5021]: I0121 15:30:00.214262 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483490-dsk6g"] Jan 21 15:30:00 crc kubenswrapper[5021]: I0121 15:30:00.282160 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4687l\" (UniqueName: \"kubernetes.io/projected/522f1ec2-d915-4b24-8f6b-a6d31c807de9-kube-api-access-4687l\") pod \"collect-profiles-29483490-dsk6g\" (UID: \"522f1ec2-d915-4b24-8f6b-a6d31c807de9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483490-dsk6g" Jan 21 15:30:00 crc kubenswrapper[5021]: I0121 15:30:00.282239 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/522f1ec2-d915-4b24-8f6b-a6d31c807de9-config-volume\") pod \"collect-profiles-29483490-dsk6g\" (UID: \"522f1ec2-d915-4b24-8f6b-a6d31c807de9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483490-dsk6g" Jan 21 15:30:00 crc kubenswrapper[5021]: I0121 15:30:00.282565 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/522f1ec2-d915-4b24-8f6b-a6d31c807de9-secret-volume\") pod \"collect-profiles-29483490-dsk6g\" (UID: \"522f1ec2-d915-4b24-8f6b-a6d31c807de9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483490-dsk6g" Jan 21 15:30:00 crc kubenswrapper[5021]: I0121 15:30:00.384346 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/522f1ec2-d915-4b24-8f6b-a6d31c807de9-secret-volume\") pod \"collect-profiles-29483490-dsk6g\" (UID: \"522f1ec2-d915-4b24-8f6b-a6d31c807de9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483490-dsk6g" Jan 21 15:30:00 crc kubenswrapper[5021]: I0121 15:30:00.384449 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4687l\" (UniqueName: \"kubernetes.io/projected/522f1ec2-d915-4b24-8f6b-a6d31c807de9-kube-api-access-4687l\") pod \"collect-profiles-29483490-dsk6g\" (UID: \"522f1ec2-d915-4b24-8f6b-a6d31c807de9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483490-dsk6g" Jan 21 15:30:00 crc kubenswrapper[5021]: I0121 15:30:00.384481 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/522f1ec2-d915-4b24-8f6b-a6d31c807de9-config-volume\") pod \"collect-profiles-29483490-dsk6g\" (UID: \"522f1ec2-d915-4b24-8f6b-a6d31c807de9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483490-dsk6g" Jan 21 15:30:00 crc kubenswrapper[5021]: I0121 15:30:00.385616 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/522f1ec2-d915-4b24-8f6b-a6d31c807de9-config-volume\") pod \"collect-profiles-29483490-dsk6g\" (UID: \"522f1ec2-d915-4b24-8f6b-a6d31c807de9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483490-dsk6g" Jan 21 15:30:00 crc kubenswrapper[5021]: I0121 15:30:00.393519 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/522f1ec2-d915-4b24-8f6b-a6d31c807de9-secret-volume\") pod \"collect-profiles-29483490-dsk6g\" (UID: \"522f1ec2-d915-4b24-8f6b-a6d31c807de9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483490-dsk6g" Jan 21 15:30:00 crc kubenswrapper[5021]: I0121 15:30:00.415082 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4687l\" (UniqueName: \"kubernetes.io/projected/522f1ec2-d915-4b24-8f6b-a6d31c807de9-kube-api-access-4687l\") pod \"collect-profiles-29483490-dsk6g\" (UID: \"522f1ec2-d915-4b24-8f6b-a6d31c807de9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483490-dsk6g" Jan 21 15:30:00 crc kubenswrapper[5021]: I0121 15:30:00.508155 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483490-dsk6g" Jan 21 15:30:00 crc kubenswrapper[5021]: I0121 15:30:00.559738 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Jan 21 15:30:00 crc kubenswrapper[5021]: I0121 15:30:00.988771 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483490-dsk6g"] Jan 21 15:30:01 crc kubenswrapper[5021]: I0121 15:30:01.788348 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29483490-dsk6g" event={"ID":"522f1ec2-d915-4b24-8f6b-a6d31c807de9","Type":"ContainerStarted","Data":"88bb6f11fe3865afc5641a005ef069608f4263363f99f9ead420bcfa5477bfdf"} Jan 21 15:30:02 crc kubenswrapper[5021]: I0121 15:30:02.796066 5021 generic.go:334] "Generic (PLEG): container finished" podID="522f1ec2-d915-4b24-8f6b-a6d31c807de9" containerID="6e08e23f3f16ee0f74bb339f1d9760136b37e8b260976ab6e3a305d88a5ba8a5" exitCode=0 Jan 21 15:30:02 crc kubenswrapper[5021]: I0121 15:30:02.796125 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29483490-dsk6g" event={"ID":"522f1ec2-d915-4b24-8f6b-a6d31c807de9","Type":"ContainerDied","Data":"6e08e23f3f16ee0f74bb339f1d9760136b37e8b260976ab6e3a305d88a5ba8a5"} Jan 21 15:30:04 crc kubenswrapper[5021]: I0121 15:30:04.143512 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483490-dsk6g" Jan 21 15:30:04 crc kubenswrapper[5021]: I0121 15:30:04.247332 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4687l\" (UniqueName: \"kubernetes.io/projected/522f1ec2-d915-4b24-8f6b-a6d31c807de9-kube-api-access-4687l\") pod \"522f1ec2-d915-4b24-8f6b-a6d31c807de9\" (UID: \"522f1ec2-d915-4b24-8f6b-a6d31c807de9\") " Jan 21 15:30:04 crc kubenswrapper[5021]: I0121 15:30:04.247440 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/522f1ec2-d915-4b24-8f6b-a6d31c807de9-config-volume\") pod \"522f1ec2-d915-4b24-8f6b-a6d31c807de9\" (UID: \"522f1ec2-d915-4b24-8f6b-a6d31c807de9\") " Jan 21 15:30:04 crc kubenswrapper[5021]: I0121 15:30:04.247592 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/522f1ec2-d915-4b24-8f6b-a6d31c807de9-secret-volume\") pod \"522f1ec2-d915-4b24-8f6b-a6d31c807de9\" (UID: \"522f1ec2-d915-4b24-8f6b-a6d31c807de9\") " Jan 21 15:30:04 crc kubenswrapper[5021]: I0121 15:30:04.248238 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/522f1ec2-d915-4b24-8f6b-a6d31c807de9-config-volume" (OuterVolumeSpecName: "config-volume") pod "522f1ec2-d915-4b24-8f6b-a6d31c807de9" (UID: "522f1ec2-d915-4b24-8f6b-a6d31c807de9"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:30:04 crc kubenswrapper[5021]: I0121 15:30:04.249183 5021 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/522f1ec2-d915-4b24-8f6b-a6d31c807de9-config-volume\") on node \"crc\" DevicePath \"\"" Jan 21 15:30:04 crc kubenswrapper[5021]: I0121 15:30:04.253975 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/522f1ec2-d915-4b24-8f6b-a6d31c807de9-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "522f1ec2-d915-4b24-8f6b-a6d31c807de9" (UID: "522f1ec2-d915-4b24-8f6b-a6d31c807de9"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:30:04 crc kubenswrapper[5021]: I0121 15:30:04.257188 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/522f1ec2-d915-4b24-8f6b-a6d31c807de9-kube-api-access-4687l" (OuterVolumeSpecName: "kube-api-access-4687l") pod "522f1ec2-d915-4b24-8f6b-a6d31c807de9" (UID: "522f1ec2-d915-4b24-8f6b-a6d31c807de9"). InnerVolumeSpecName "kube-api-access-4687l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:30:04 crc kubenswrapper[5021]: I0121 15:30:04.350480 5021 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/522f1ec2-d915-4b24-8f6b-a6d31c807de9-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 21 15:30:04 crc kubenswrapper[5021]: I0121 15:30:04.350518 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4687l\" (UniqueName: \"kubernetes.io/projected/522f1ec2-d915-4b24-8f6b-a6d31c807de9-kube-api-access-4687l\") on node \"crc\" DevicePath \"\"" Jan 21 15:30:04 crc kubenswrapper[5021]: I0121 15:30:04.809275 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29483490-dsk6g" event={"ID":"522f1ec2-d915-4b24-8f6b-a6d31c807de9","Type":"ContainerDied","Data":"88bb6f11fe3865afc5641a005ef069608f4263363f99f9ead420bcfa5477bfdf"} Jan 21 15:30:04 crc kubenswrapper[5021]: I0121 15:30:04.809324 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="88bb6f11fe3865afc5641a005ef069608f4263363f99f9ead420bcfa5477bfdf" Jan 21 15:30:04 crc kubenswrapper[5021]: I0121 15:30:04.809359 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483490-dsk6g" Jan 21 15:30:12 crc kubenswrapper[5021]: I0121 15:30:12.357082 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 15:30:12 crc kubenswrapper[5021]: I0121 15:30:12.357737 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 15:30:42 crc kubenswrapper[5021]: I0121 15:30:42.272875 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-58888f8468-mr52b"] Jan 21 15:30:42 crc kubenswrapper[5021]: I0121 15:30:42.274893 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-58888f8468-mr52b" podUID="afe458b5-c2ef-408a-ab46-4b87b42afb08" containerName="controller-manager" containerID="cri-o://e0e6c98155626b5a4c151376d3c496f3344d061da608b5e468fbb233493d0082" gracePeriod=30 Jan 21 15:30:42 crc kubenswrapper[5021]: I0121 15:30:42.356891 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 15:30:42 crc kubenswrapper[5021]: I0121 15:30:42.357022 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 15:30:43 crc kubenswrapper[5021]: I0121 15:30:43.811108 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-58888f8468-mr52b" Jan 21 15:30:43 crc kubenswrapper[5021]: I0121 15:30:43.846422 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-5f97cd7dcc-2t5l5"] Jan 21 15:30:43 crc kubenswrapper[5021]: E0121 15:30:43.847127 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="afe458b5-c2ef-408a-ab46-4b87b42afb08" containerName="controller-manager" Jan 21 15:30:43 crc kubenswrapper[5021]: I0121 15:30:43.848524 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="afe458b5-c2ef-408a-ab46-4b87b42afb08" containerName="controller-manager" Jan 21 15:30:43 crc kubenswrapper[5021]: E0121 15:30:43.849501 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="522f1ec2-d915-4b24-8f6b-a6d31c807de9" containerName="collect-profiles" Jan 21 15:30:43 crc kubenswrapper[5021]: I0121 15:30:43.849614 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="522f1ec2-d915-4b24-8f6b-a6d31c807de9" containerName="collect-profiles" Jan 21 15:30:43 crc kubenswrapper[5021]: I0121 15:30:43.849886 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="522f1ec2-d915-4b24-8f6b-a6d31c807de9" containerName="collect-profiles" Jan 21 15:30:43 crc kubenswrapper[5021]: I0121 15:30:43.849989 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="afe458b5-c2ef-408a-ab46-4b87b42afb08" containerName="controller-manager" Jan 21 15:30:43 crc kubenswrapper[5021]: I0121 15:30:43.852774 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5f97cd7dcc-2t5l5" Jan 21 15:30:43 crc kubenswrapper[5021]: I0121 15:30:43.881486 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5f97cd7dcc-2t5l5"] Jan 21 15:30:43 crc kubenswrapper[5021]: I0121 15:30:43.918041 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/afe458b5-c2ef-408a-ab46-4b87b42afb08-serving-cert\") pod \"afe458b5-c2ef-408a-ab46-4b87b42afb08\" (UID: \"afe458b5-c2ef-408a-ab46-4b87b42afb08\") " Jan 21 15:30:43 crc kubenswrapper[5021]: I0121 15:30:43.918160 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tq6ch\" (UniqueName: \"kubernetes.io/projected/afe458b5-c2ef-408a-ab46-4b87b42afb08-kube-api-access-tq6ch\") pod \"afe458b5-c2ef-408a-ab46-4b87b42afb08\" (UID: \"afe458b5-c2ef-408a-ab46-4b87b42afb08\") " Jan 21 15:30:43 crc kubenswrapper[5021]: I0121 15:30:43.918207 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/afe458b5-c2ef-408a-ab46-4b87b42afb08-client-ca\") pod \"afe458b5-c2ef-408a-ab46-4b87b42afb08\" (UID: \"afe458b5-c2ef-408a-ab46-4b87b42afb08\") " Jan 21 15:30:43 crc kubenswrapper[5021]: I0121 15:30:43.918280 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/afe458b5-c2ef-408a-ab46-4b87b42afb08-config\") pod \"afe458b5-c2ef-408a-ab46-4b87b42afb08\" (UID: \"afe458b5-c2ef-408a-ab46-4b87b42afb08\") " Jan 21 15:30:43 crc kubenswrapper[5021]: I0121 15:30:43.918322 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/afe458b5-c2ef-408a-ab46-4b87b42afb08-proxy-ca-bundles\") pod \"afe458b5-c2ef-408a-ab46-4b87b42afb08\" (UID: \"afe458b5-c2ef-408a-ab46-4b87b42afb08\") " Jan 21 15:30:43 crc kubenswrapper[5021]: I0121 15:30:43.919406 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7106019d-00ce-46a2-a0a5-36e8c3f1e5a0-config\") pod \"controller-manager-5f97cd7dcc-2t5l5\" (UID: \"7106019d-00ce-46a2-a0a5-36e8c3f1e5a0\") " pod="openshift-controller-manager/controller-manager-5f97cd7dcc-2t5l5" Jan 21 15:30:43 crc kubenswrapper[5021]: I0121 15:30:43.919492 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7106019d-00ce-46a2-a0a5-36e8c3f1e5a0-serving-cert\") pod \"controller-manager-5f97cd7dcc-2t5l5\" (UID: \"7106019d-00ce-46a2-a0a5-36e8c3f1e5a0\") " pod="openshift-controller-manager/controller-manager-5f97cd7dcc-2t5l5" Jan 21 15:30:43 crc kubenswrapper[5021]: I0121 15:30:43.919552 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7106019d-00ce-46a2-a0a5-36e8c3f1e5a0-proxy-ca-bundles\") pod \"controller-manager-5f97cd7dcc-2t5l5\" (UID: \"7106019d-00ce-46a2-a0a5-36e8c3f1e5a0\") " pod="openshift-controller-manager/controller-manager-5f97cd7dcc-2t5l5" Jan 21 15:30:43 crc kubenswrapper[5021]: I0121 15:30:43.919541 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/afe458b5-c2ef-408a-ab46-4b87b42afb08-client-ca" (OuterVolumeSpecName: "client-ca") pod "afe458b5-c2ef-408a-ab46-4b87b42afb08" (UID: "afe458b5-c2ef-408a-ab46-4b87b42afb08"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:30:43 crc kubenswrapper[5021]: I0121 15:30:43.919600 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7106019d-00ce-46a2-a0a5-36e8c3f1e5a0-client-ca\") pod \"controller-manager-5f97cd7dcc-2t5l5\" (UID: \"7106019d-00ce-46a2-a0a5-36e8c3f1e5a0\") " pod="openshift-controller-manager/controller-manager-5f97cd7dcc-2t5l5" Jan 21 15:30:43 crc kubenswrapper[5021]: I0121 15:30:43.919665 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sh2wr\" (UniqueName: \"kubernetes.io/projected/7106019d-00ce-46a2-a0a5-36e8c3f1e5a0-kube-api-access-sh2wr\") pod \"controller-manager-5f97cd7dcc-2t5l5\" (UID: \"7106019d-00ce-46a2-a0a5-36e8c3f1e5a0\") " pod="openshift-controller-manager/controller-manager-5f97cd7dcc-2t5l5" Jan 21 15:30:43 crc kubenswrapper[5021]: I0121 15:30:43.919747 5021 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/afe458b5-c2ef-408a-ab46-4b87b42afb08-client-ca\") on node \"crc\" DevicePath \"\"" Jan 21 15:30:43 crc kubenswrapper[5021]: I0121 15:30:43.920427 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/afe458b5-c2ef-408a-ab46-4b87b42afb08-config" (OuterVolumeSpecName: "config") pod "afe458b5-c2ef-408a-ab46-4b87b42afb08" (UID: "afe458b5-c2ef-408a-ab46-4b87b42afb08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:30:43 crc kubenswrapper[5021]: I0121 15:30:43.920526 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/afe458b5-c2ef-408a-ab46-4b87b42afb08-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "afe458b5-c2ef-408a-ab46-4b87b42afb08" (UID: "afe458b5-c2ef-408a-ab46-4b87b42afb08"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:30:43 crc kubenswrapper[5021]: I0121 15:30:43.926590 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/afe458b5-c2ef-408a-ab46-4b87b42afb08-kube-api-access-tq6ch" (OuterVolumeSpecName: "kube-api-access-tq6ch") pod "afe458b5-c2ef-408a-ab46-4b87b42afb08" (UID: "afe458b5-c2ef-408a-ab46-4b87b42afb08"). InnerVolumeSpecName "kube-api-access-tq6ch". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:30:43 crc kubenswrapper[5021]: I0121 15:30:43.926768 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/afe458b5-c2ef-408a-ab46-4b87b42afb08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "afe458b5-c2ef-408a-ab46-4b87b42afb08" (UID: "afe458b5-c2ef-408a-ab46-4b87b42afb08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:30:44 crc kubenswrapper[5021]: I0121 15:30:44.021259 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7106019d-00ce-46a2-a0a5-36e8c3f1e5a0-client-ca\") pod \"controller-manager-5f97cd7dcc-2t5l5\" (UID: \"7106019d-00ce-46a2-a0a5-36e8c3f1e5a0\") " pod="openshift-controller-manager/controller-manager-5f97cd7dcc-2t5l5" Jan 21 15:30:44 crc kubenswrapper[5021]: I0121 15:30:44.021380 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sh2wr\" (UniqueName: \"kubernetes.io/projected/7106019d-00ce-46a2-a0a5-36e8c3f1e5a0-kube-api-access-sh2wr\") pod \"controller-manager-5f97cd7dcc-2t5l5\" (UID: \"7106019d-00ce-46a2-a0a5-36e8c3f1e5a0\") " pod="openshift-controller-manager/controller-manager-5f97cd7dcc-2t5l5" Jan 21 15:30:44 crc kubenswrapper[5021]: I0121 15:30:44.021423 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7106019d-00ce-46a2-a0a5-36e8c3f1e5a0-config\") pod \"controller-manager-5f97cd7dcc-2t5l5\" (UID: \"7106019d-00ce-46a2-a0a5-36e8c3f1e5a0\") " pod="openshift-controller-manager/controller-manager-5f97cd7dcc-2t5l5" Jan 21 15:30:44 crc kubenswrapper[5021]: I0121 15:30:44.021474 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7106019d-00ce-46a2-a0a5-36e8c3f1e5a0-serving-cert\") pod \"controller-manager-5f97cd7dcc-2t5l5\" (UID: \"7106019d-00ce-46a2-a0a5-36e8c3f1e5a0\") " pod="openshift-controller-manager/controller-manager-5f97cd7dcc-2t5l5" Jan 21 15:30:44 crc kubenswrapper[5021]: I0121 15:30:44.021507 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7106019d-00ce-46a2-a0a5-36e8c3f1e5a0-proxy-ca-bundles\") pod \"controller-manager-5f97cd7dcc-2t5l5\" (UID: \"7106019d-00ce-46a2-a0a5-36e8c3f1e5a0\") " pod="openshift-controller-manager/controller-manager-5f97cd7dcc-2t5l5" Jan 21 15:30:44 crc kubenswrapper[5021]: I0121 15:30:44.021565 5021 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/afe458b5-c2ef-408a-ab46-4b87b42afb08-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 15:30:44 crc kubenswrapper[5021]: I0121 15:30:44.021583 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tq6ch\" (UniqueName: \"kubernetes.io/projected/afe458b5-c2ef-408a-ab46-4b87b42afb08-kube-api-access-tq6ch\") on node \"crc\" DevicePath \"\"" Jan 21 15:30:44 crc kubenswrapper[5021]: I0121 15:30:44.021602 5021 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/afe458b5-c2ef-408a-ab46-4b87b42afb08-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:30:44 crc kubenswrapper[5021]: I0121 15:30:44.021614 5021 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/afe458b5-c2ef-408a-ab46-4b87b42afb08-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 21 15:30:44 crc kubenswrapper[5021]: I0121 15:30:44.023521 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7106019d-00ce-46a2-a0a5-36e8c3f1e5a0-proxy-ca-bundles\") pod \"controller-manager-5f97cd7dcc-2t5l5\" (UID: \"7106019d-00ce-46a2-a0a5-36e8c3f1e5a0\") " pod="openshift-controller-manager/controller-manager-5f97cd7dcc-2t5l5" Jan 21 15:30:44 crc kubenswrapper[5021]: I0121 15:30:44.023703 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7106019d-00ce-46a2-a0a5-36e8c3f1e5a0-client-ca\") pod \"controller-manager-5f97cd7dcc-2t5l5\" (UID: \"7106019d-00ce-46a2-a0a5-36e8c3f1e5a0\") " pod="openshift-controller-manager/controller-manager-5f97cd7dcc-2t5l5" Jan 21 15:30:44 crc kubenswrapper[5021]: I0121 15:30:44.024738 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7106019d-00ce-46a2-a0a5-36e8c3f1e5a0-config\") pod \"controller-manager-5f97cd7dcc-2t5l5\" (UID: \"7106019d-00ce-46a2-a0a5-36e8c3f1e5a0\") " pod="openshift-controller-manager/controller-manager-5f97cd7dcc-2t5l5" Jan 21 15:30:44 crc kubenswrapper[5021]: I0121 15:30:44.027494 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7106019d-00ce-46a2-a0a5-36e8c3f1e5a0-serving-cert\") pod \"controller-manager-5f97cd7dcc-2t5l5\" (UID: \"7106019d-00ce-46a2-a0a5-36e8c3f1e5a0\") " pod="openshift-controller-manager/controller-manager-5f97cd7dcc-2t5l5" Jan 21 15:30:44 crc kubenswrapper[5021]: I0121 15:30:44.040025 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sh2wr\" (UniqueName: \"kubernetes.io/projected/7106019d-00ce-46a2-a0a5-36e8c3f1e5a0-kube-api-access-sh2wr\") pod \"controller-manager-5f97cd7dcc-2t5l5\" (UID: \"7106019d-00ce-46a2-a0a5-36e8c3f1e5a0\") " pod="openshift-controller-manager/controller-manager-5f97cd7dcc-2t5l5" Jan 21 15:30:44 crc kubenswrapper[5021]: I0121 15:30:44.057539 5021 generic.go:334] "Generic (PLEG): container finished" podID="afe458b5-c2ef-408a-ab46-4b87b42afb08" containerID="e0e6c98155626b5a4c151376d3c496f3344d061da608b5e468fbb233493d0082" exitCode=0 Jan 21 15:30:44 crc kubenswrapper[5021]: I0121 15:30:44.057617 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-58888f8468-mr52b" Jan 21 15:30:44 crc kubenswrapper[5021]: I0121 15:30:44.057643 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-58888f8468-mr52b" event={"ID":"afe458b5-c2ef-408a-ab46-4b87b42afb08","Type":"ContainerDied","Data":"e0e6c98155626b5a4c151376d3c496f3344d061da608b5e468fbb233493d0082"} Jan 21 15:30:44 crc kubenswrapper[5021]: I0121 15:30:44.058244 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-58888f8468-mr52b" event={"ID":"afe458b5-c2ef-408a-ab46-4b87b42afb08","Type":"ContainerDied","Data":"88269991cc5f0a7a672133fc9721645f57a50b47a18c5180e471216b01ce6082"} Jan 21 15:30:44 crc kubenswrapper[5021]: I0121 15:30:44.058331 5021 scope.go:117] "RemoveContainer" containerID="e0e6c98155626b5a4c151376d3c496f3344d061da608b5e468fbb233493d0082" Jan 21 15:30:44 crc kubenswrapper[5021]: I0121 15:30:44.081361 5021 scope.go:117] "RemoveContainer" containerID="e0e6c98155626b5a4c151376d3c496f3344d061da608b5e468fbb233493d0082" Jan 21 15:30:44 crc kubenswrapper[5021]: E0121 15:30:44.082089 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e0e6c98155626b5a4c151376d3c496f3344d061da608b5e468fbb233493d0082\": container with ID starting with e0e6c98155626b5a4c151376d3c496f3344d061da608b5e468fbb233493d0082 not found: ID does not exist" containerID="e0e6c98155626b5a4c151376d3c496f3344d061da608b5e468fbb233493d0082" Jan 21 15:30:44 crc kubenswrapper[5021]: I0121 15:30:44.082159 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e0e6c98155626b5a4c151376d3c496f3344d061da608b5e468fbb233493d0082"} err="failed to get container status \"e0e6c98155626b5a4c151376d3c496f3344d061da608b5e468fbb233493d0082\": rpc error: code = NotFound desc = could not find container \"e0e6c98155626b5a4c151376d3c496f3344d061da608b5e468fbb233493d0082\": container with ID starting with e0e6c98155626b5a4c151376d3c496f3344d061da608b5e468fbb233493d0082 not found: ID does not exist" Jan 21 15:30:44 crc kubenswrapper[5021]: I0121 15:30:44.090249 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-58888f8468-mr52b"] Jan 21 15:30:44 crc kubenswrapper[5021]: I0121 15:30:44.093654 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-58888f8468-mr52b"] Jan 21 15:30:44 crc kubenswrapper[5021]: I0121 15:30:44.199549 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5f97cd7dcc-2t5l5" Jan 21 15:30:44 crc kubenswrapper[5021]: I0121 15:30:44.395373 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5f97cd7dcc-2t5l5"] Jan 21 15:30:44 crc kubenswrapper[5021]: I0121 15:30:44.744975 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="afe458b5-c2ef-408a-ab46-4b87b42afb08" path="/var/lib/kubelet/pods/afe458b5-c2ef-408a-ab46-4b87b42afb08/volumes" Jan 21 15:30:45 crc kubenswrapper[5021]: I0121 15:30:45.066006 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5f97cd7dcc-2t5l5" event={"ID":"7106019d-00ce-46a2-a0a5-36e8c3f1e5a0","Type":"ContainerStarted","Data":"3187a8167c93e8c14b89531a86eb1fc464d044381e630ea06ba32c2ee68ec32e"} Jan 21 15:30:45 crc kubenswrapper[5021]: I0121 15:30:45.066070 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5f97cd7dcc-2t5l5" event={"ID":"7106019d-00ce-46a2-a0a5-36e8c3f1e5a0","Type":"ContainerStarted","Data":"d1dce19d8f2d8a51be541f9612e4d1456be0ea16c0cdedb4f985a7a0540c1aca"} Jan 21 15:30:45 crc kubenswrapper[5021]: I0121 15:30:45.067860 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-5f97cd7dcc-2t5l5" Jan 21 15:30:45 crc kubenswrapper[5021]: I0121 15:30:45.073740 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-5f97cd7dcc-2t5l5" Jan 21 15:30:45 crc kubenswrapper[5021]: I0121 15:30:45.088419 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-5f97cd7dcc-2t5l5" podStartSLOduration=3.088387568 podStartE2EDuration="3.088387568s" podCreationTimestamp="2026-01-21 15:30:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:30:45.083384664 +0000 UTC m=+386.618498553" watchObservedRunningTime="2026-01-21 15:30:45.088387568 +0000 UTC m=+386.623501457" Jan 21 15:30:52 crc kubenswrapper[5021]: I0121 15:30:52.426830 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-7mnxh"] Jan 21 15:30:52 crc kubenswrapper[5021]: I0121 15:30:52.428074 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-7mnxh" Jan 21 15:30:52 crc kubenswrapper[5021]: I0121 15:30:52.451119 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-7mnxh"] Jan 21 15:30:52 crc kubenswrapper[5021]: I0121 15:30:52.535084 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/63f57f38-5cf6-4f89-b522-f3ae89bc7faa-ca-trust-extracted\") pod \"image-registry-66df7c8f76-7mnxh\" (UID: \"63f57f38-5cf6-4f89-b522-f3ae89bc7faa\") " pod="openshift-image-registry/image-registry-66df7c8f76-7mnxh" Jan 21 15:30:52 crc kubenswrapper[5021]: I0121 15:30:52.535448 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/63f57f38-5cf6-4f89-b522-f3ae89bc7faa-bound-sa-token\") pod \"image-registry-66df7c8f76-7mnxh\" (UID: \"63f57f38-5cf6-4f89-b522-f3ae89bc7faa\") " pod="openshift-image-registry/image-registry-66df7c8f76-7mnxh" Jan 21 15:30:52 crc kubenswrapper[5021]: I0121 15:30:52.535517 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-7mnxh\" (UID: \"63f57f38-5cf6-4f89-b522-f3ae89bc7faa\") " pod="openshift-image-registry/image-registry-66df7c8f76-7mnxh" Jan 21 15:30:52 crc kubenswrapper[5021]: I0121 15:30:52.535536 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/63f57f38-5cf6-4f89-b522-f3ae89bc7faa-registry-tls\") pod \"image-registry-66df7c8f76-7mnxh\" (UID: \"63f57f38-5cf6-4f89-b522-f3ae89bc7faa\") " pod="openshift-image-registry/image-registry-66df7c8f76-7mnxh" Jan 21 15:30:52 crc kubenswrapper[5021]: I0121 15:30:52.535551 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/63f57f38-5cf6-4f89-b522-f3ae89bc7faa-trusted-ca\") pod \"image-registry-66df7c8f76-7mnxh\" (UID: \"63f57f38-5cf6-4f89-b522-f3ae89bc7faa\") " pod="openshift-image-registry/image-registry-66df7c8f76-7mnxh" Jan 21 15:30:52 crc kubenswrapper[5021]: I0121 15:30:52.535573 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/63f57f38-5cf6-4f89-b522-f3ae89bc7faa-registry-certificates\") pod \"image-registry-66df7c8f76-7mnxh\" (UID: \"63f57f38-5cf6-4f89-b522-f3ae89bc7faa\") " pod="openshift-image-registry/image-registry-66df7c8f76-7mnxh" Jan 21 15:30:52 crc kubenswrapper[5021]: I0121 15:30:52.535599 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/63f57f38-5cf6-4f89-b522-f3ae89bc7faa-installation-pull-secrets\") pod \"image-registry-66df7c8f76-7mnxh\" (UID: \"63f57f38-5cf6-4f89-b522-f3ae89bc7faa\") " pod="openshift-image-registry/image-registry-66df7c8f76-7mnxh" Jan 21 15:30:52 crc kubenswrapper[5021]: I0121 15:30:52.535760 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ndgk8\" (UniqueName: \"kubernetes.io/projected/63f57f38-5cf6-4f89-b522-f3ae89bc7faa-kube-api-access-ndgk8\") pod \"image-registry-66df7c8f76-7mnxh\" (UID: \"63f57f38-5cf6-4f89-b522-f3ae89bc7faa\") " pod="openshift-image-registry/image-registry-66df7c8f76-7mnxh" Jan 21 15:30:52 crc kubenswrapper[5021]: I0121 15:30:52.560336 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-7mnxh\" (UID: \"63f57f38-5cf6-4f89-b522-f3ae89bc7faa\") " pod="openshift-image-registry/image-registry-66df7c8f76-7mnxh" Jan 21 15:30:52 crc kubenswrapper[5021]: I0121 15:30:52.637056 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/63f57f38-5cf6-4f89-b522-f3ae89bc7faa-registry-tls\") pod \"image-registry-66df7c8f76-7mnxh\" (UID: \"63f57f38-5cf6-4f89-b522-f3ae89bc7faa\") " pod="openshift-image-registry/image-registry-66df7c8f76-7mnxh" Jan 21 15:30:52 crc kubenswrapper[5021]: I0121 15:30:52.637109 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/63f57f38-5cf6-4f89-b522-f3ae89bc7faa-trusted-ca\") pod \"image-registry-66df7c8f76-7mnxh\" (UID: \"63f57f38-5cf6-4f89-b522-f3ae89bc7faa\") " pod="openshift-image-registry/image-registry-66df7c8f76-7mnxh" Jan 21 15:30:52 crc kubenswrapper[5021]: I0121 15:30:52.637177 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/63f57f38-5cf6-4f89-b522-f3ae89bc7faa-registry-certificates\") pod \"image-registry-66df7c8f76-7mnxh\" (UID: \"63f57f38-5cf6-4f89-b522-f3ae89bc7faa\") " pod="openshift-image-registry/image-registry-66df7c8f76-7mnxh" Jan 21 15:30:52 crc kubenswrapper[5021]: I0121 15:30:52.637212 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/63f57f38-5cf6-4f89-b522-f3ae89bc7faa-installation-pull-secrets\") pod \"image-registry-66df7c8f76-7mnxh\" (UID: \"63f57f38-5cf6-4f89-b522-f3ae89bc7faa\") " pod="openshift-image-registry/image-registry-66df7c8f76-7mnxh" Jan 21 15:30:52 crc kubenswrapper[5021]: I0121 15:30:52.637240 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ndgk8\" (UniqueName: \"kubernetes.io/projected/63f57f38-5cf6-4f89-b522-f3ae89bc7faa-kube-api-access-ndgk8\") pod \"image-registry-66df7c8f76-7mnxh\" (UID: \"63f57f38-5cf6-4f89-b522-f3ae89bc7faa\") " pod="openshift-image-registry/image-registry-66df7c8f76-7mnxh" Jan 21 15:30:52 crc kubenswrapper[5021]: I0121 15:30:52.637279 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/63f57f38-5cf6-4f89-b522-f3ae89bc7faa-ca-trust-extracted\") pod \"image-registry-66df7c8f76-7mnxh\" (UID: \"63f57f38-5cf6-4f89-b522-f3ae89bc7faa\") " pod="openshift-image-registry/image-registry-66df7c8f76-7mnxh" Jan 21 15:30:52 crc kubenswrapper[5021]: I0121 15:30:52.637321 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/63f57f38-5cf6-4f89-b522-f3ae89bc7faa-bound-sa-token\") pod \"image-registry-66df7c8f76-7mnxh\" (UID: \"63f57f38-5cf6-4f89-b522-f3ae89bc7faa\") " pod="openshift-image-registry/image-registry-66df7c8f76-7mnxh" Jan 21 15:30:52 crc kubenswrapper[5021]: I0121 15:30:52.638869 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/63f57f38-5cf6-4f89-b522-f3ae89bc7faa-registry-certificates\") pod \"image-registry-66df7c8f76-7mnxh\" (UID: \"63f57f38-5cf6-4f89-b522-f3ae89bc7faa\") " pod="openshift-image-registry/image-registry-66df7c8f76-7mnxh" Jan 21 15:30:52 crc kubenswrapper[5021]: I0121 15:30:52.640031 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/63f57f38-5cf6-4f89-b522-f3ae89bc7faa-ca-trust-extracted\") pod \"image-registry-66df7c8f76-7mnxh\" (UID: \"63f57f38-5cf6-4f89-b522-f3ae89bc7faa\") " pod="openshift-image-registry/image-registry-66df7c8f76-7mnxh" Jan 21 15:30:52 crc kubenswrapper[5021]: I0121 15:30:52.644183 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/63f57f38-5cf6-4f89-b522-f3ae89bc7faa-trusted-ca\") pod \"image-registry-66df7c8f76-7mnxh\" (UID: \"63f57f38-5cf6-4f89-b522-f3ae89bc7faa\") " pod="openshift-image-registry/image-registry-66df7c8f76-7mnxh" Jan 21 15:30:52 crc kubenswrapper[5021]: I0121 15:30:52.655129 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/63f57f38-5cf6-4f89-b522-f3ae89bc7faa-installation-pull-secrets\") pod \"image-registry-66df7c8f76-7mnxh\" (UID: \"63f57f38-5cf6-4f89-b522-f3ae89bc7faa\") " pod="openshift-image-registry/image-registry-66df7c8f76-7mnxh" Jan 21 15:30:52 crc kubenswrapper[5021]: I0121 15:30:52.655145 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/63f57f38-5cf6-4f89-b522-f3ae89bc7faa-registry-tls\") pod \"image-registry-66df7c8f76-7mnxh\" (UID: \"63f57f38-5cf6-4f89-b522-f3ae89bc7faa\") " pod="openshift-image-registry/image-registry-66df7c8f76-7mnxh" Jan 21 15:30:52 crc kubenswrapper[5021]: I0121 15:30:52.657233 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/63f57f38-5cf6-4f89-b522-f3ae89bc7faa-bound-sa-token\") pod \"image-registry-66df7c8f76-7mnxh\" (UID: \"63f57f38-5cf6-4f89-b522-f3ae89bc7faa\") " pod="openshift-image-registry/image-registry-66df7c8f76-7mnxh" Jan 21 15:30:52 crc kubenswrapper[5021]: I0121 15:30:52.657988 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ndgk8\" (UniqueName: \"kubernetes.io/projected/63f57f38-5cf6-4f89-b522-f3ae89bc7faa-kube-api-access-ndgk8\") pod \"image-registry-66df7c8f76-7mnxh\" (UID: \"63f57f38-5cf6-4f89-b522-f3ae89bc7faa\") " pod="openshift-image-registry/image-registry-66df7c8f76-7mnxh" Jan 21 15:30:52 crc kubenswrapper[5021]: I0121 15:30:52.748044 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-7mnxh" Jan 21 15:30:53 crc kubenswrapper[5021]: I0121 15:30:53.154130 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-7mnxh"] Jan 21 15:30:54 crc kubenswrapper[5021]: I0121 15:30:54.115973 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-7mnxh" event={"ID":"63f57f38-5cf6-4f89-b522-f3ae89bc7faa","Type":"ContainerStarted","Data":"026d03421e4de15088cf77fee1c046e249c1d299e8e87e68d0bf0c427aaa036a"} Jan 21 15:30:54 crc kubenswrapper[5021]: I0121 15:30:54.116600 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-7mnxh" event={"ID":"63f57f38-5cf6-4f89-b522-f3ae89bc7faa","Type":"ContainerStarted","Data":"dfe6a3f27d4b31df2b937c68d3f7afae8b616e06cd095d47d3deeb006d89fb4e"} Jan 21 15:30:54 crc kubenswrapper[5021]: I0121 15:30:54.116661 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-7mnxh" Jan 21 15:30:54 crc kubenswrapper[5021]: I0121 15:30:54.134689 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-7mnxh" podStartSLOduration=2.134661476 podStartE2EDuration="2.134661476s" podCreationTimestamp="2026-01-21 15:30:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:30:54.133310207 +0000 UTC m=+395.668424096" watchObservedRunningTime="2026-01-21 15:30:54.134661476 +0000 UTC m=+395.669775385" Jan 21 15:31:12 crc kubenswrapper[5021]: I0121 15:31:12.357397 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 15:31:12 crc kubenswrapper[5021]: I0121 15:31:12.358262 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 15:31:12 crc kubenswrapper[5021]: I0121 15:31:12.358339 5021 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" Jan 21 15:31:12 crc kubenswrapper[5021]: I0121 15:31:12.359253 5021 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c8a334c0d1d06dd287b03b7bbb8d0161ca638f7571e2c8577f63c3b4d6b95f04"} pod="openshift-machine-config-operator/machine-config-daemon-n22xz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 21 15:31:12 crc kubenswrapper[5021]: I0121 15:31:12.359343 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" containerID="cri-o://c8a334c0d1d06dd287b03b7bbb8d0161ca638f7571e2c8577f63c3b4d6b95f04" gracePeriod=600 Jan 21 15:31:12 crc kubenswrapper[5021]: I0121 15:31:12.754565 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-7mnxh" Jan 21 15:31:12 crc kubenswrapper[5021]: I0121 15:31:12.843382 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-zmqhz"] Jan 21 15:31:13 crc kubenswrapper[5021]: I0121 15:31:13.244198 5021 generic.go:334] "Generic (PLEG): container finished" podID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerID="c8a334c0d1d06dd287b03b7bbb8d0161ca638f7571e2c8577f63c3b4d6b95f04" exitCode=0 Jan 21 15:31:13 crc kubenswrapper[5021]: I0121 15:31:13.244289 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" event={"ID":"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1","Type":"ContainerDied","Data":"c8a334c0d1d06dd287b03b7bbb8d0161ca638f7571e2c8577f63c3b4d6b95f04"} Jan 21 15:31:13 crc kubenswrapper[5021]: I0121 15:31:13.244547 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" event={"ID":"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1","Type":"ContainerStarted","Data":"9223686c7e9f3e200942601b9f2ab70d47dcf8ddc5c6e0c3f6d57943b68fa733"} Jan 21 15:31:13 crc kubenswrapper[5021]: I0121 15:31:13.244571 5021 scope.go:117] "RemoveContainer" containerID="e4c5895dd4f412ff99b134a78d67b65a3792a50da3f9a88407af678092a29d5b" Jan 21 15:31:37 crc kubenswrapper[5021]: I0121 15:31:37.876022 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" podUID="e1a2d469-35da-4253-b3a5-057b68c4d68b" containerName="registry" containerID="cri-o://b085241572c09dc7e059e623f51d2d3997115952267fb4ff501386d274be131e" gracePeriod=30 Jan 21 15:31:38 crc kubenswrapper[5021]: I0121 15:31:38.250649 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:31:38 crc kubenswrapper[5021]: I0121 15:31:38.395395 5021 generic.go:334] "Generic (PLEG): container finished" podID="e1a2d469-35da-4253-b3a5-057b68c4d68b" containerID="b085241572c09dc7e059e623f51d2d3997115952267fb4ff501386d274be131e" exitCode=0 Jan 21 15:31:38 crc kubenswrapper[5021]: I0121 15:31:38.395488 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" event={"ID":"e1a2d469-35da-4253-b3a5-057b68c4d68b","Type":"ContainerDied","Data":"b085241572c09dc7e059e623f51d2d3997115952267fb4ff501386d274be131e"} Jan 21 15:31:38 crc kubenswrapper[5021]: I0121 15:31:38.395526 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" Jan 21 15:31:38 crc kubenswrapper[5021]: I0121 15:31:38.395593 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-zmqhz" event={"ID":"e1a2d469-35da-4253-b3a5-057b68c4d68b","Type":"ContainerDied","Data":"9008e9eaf3bd6a0b6babf4bc3deb59e9f130f9526ac4d4540ffe77c532d8eb45"} Jan 21 15:31:38 crc kubenswrapper[5021]: I0121 15:31:38.395640 5021 scope.go:117] "RemoveContainer" containerID="b085241572c09dc7e059e623f51d2d3997115952267fb4ff501386d274be131e" Jan 21 15:31:38 crc kubenswrapper[5021]: I0121 15:31:38.419323 5021 scope.go:117] "RemoveContainer" containerID="b085241572c09dc7e059e623f51d2d3997115952267fb4ff501386d274be131e" Jan 21 15:31:38 crc kubenswrapper[5021]: E0121 15:31:38.419793 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b085241572c09dc7e059e623f51d2d3997115952267fb4ff501386d274be131e\": container with ID starting with b085241572c09dc7e059e623f51d2d3997115952267fb4ff501386d274be131e not found: ID does not exist" containerID="b085241572c09dc7e059e623f51d2d3997115952267fb4ff501386d274be131e" Jan 21 15:31:38 crc kubenswrapper[5021]: I0121 15:31:38.419859 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b085241572c09dc7e059e623f51d2d3997115952267fb4ff501386d274be131e"} err="failed to get container status \"b085241572c09dc7e059e623f51d2d3997115952267fb4ff501386d274be131e\": rpc error: code = NotFound desc = could not find container \"b085241572c09dc7e059e623f51d2d3997115952267fb4ff501386d274be131e\": container with ID starting with b085241572c09dc7e059e623f51d2d3997115952267fb4ff501386d274be131e not found: ID does not exist" Jan 21 15:31:38 crc kubenswrapper[5021]: I0121 15:31:38.450607 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/e1a2d469-35da-4253-b3a5-057b68c4d68b-registry-tls\") pod \"e1a2d469-35da-4253-b3a5-057b68c4d68b\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " Jan 21 15:31:38 crc kubenswrapper[5021]: I0121 15:31:38.450785 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e1a2d469-35da-4253-b3a5-057b68c4d68b-trusted-ca\") pod \"e1a2d469-35da-4253-b3a5-057b68c4d68b\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " Jan 21 15:31:38 crc kubenswrapper[5021]: I0121 15:31:38.451312 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"e1a2d469-35da-4253-b3a5-057b68c4d68b\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " Jan 21 15:31:38 crc kubenswrapper[5021]: I0121 15:31:38.451380 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rj5gj\" (UniqueName: \"kubernetes.io/projected/e1a2d469-35da-4253-b3a5-057b68c4d68b-kube-api-access-rj5gj\") pod \"e1a2d469-35da-4253-b3a5-057b68c4d68b\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " Jan 21 15:31:38 crc kubenswrapper[5021]: I0121 15:31:38.451426 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/e1a2d469-35da-4253-b3a5-057b68c4d68b-bound-sa-token\") pod \"e1a2d469-35da-4253-b3a5-057b68c4d68b\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " Jan 21 15:31:38 crc kubenswrapper[5021]: I0121 15:31:38.451536 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/e1a2d469-35da-4253-b3a5-057b68c4d68b-registry-certificates\") pod \"e1a2d469-35da-4253-b3a5-057b68c4d68b\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " Jan 21 15:31:38 crc kubenswrapper[5021]: I0121 15:31:38.451626 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/e1a2d469-35da-4253-b3a5-057b68c4d68b-ca-trust-extracted\") pod \"e1a2d469-35da-4253-b3a5-057b68c4d68b\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " Jan 21 15:31:38 crc kubenswrapper[5021]: I0121 15:31:38.451702 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/e1a2d469-35da-4253-b3a5-057b68c4d68b-installation-pull-secrets\") pod \"e1a2d469-35da-4253-b3a5-057b68c4d68b\" (UID: \"e1a2d469-35da-4253-b3a5-057b68c4d68b\") " Jan 21 15:31:38 crc kubenswrapper[5021]: I0121 15:31:38.452539 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e1a2d469-35da-4253-b3a5-057b68c4d68b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "e1a2d469-35da-4253-b3a5-057b68c4d68b" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:31:38 crc kubenswrapper[5021]: I0121 15:31:38.453313 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e1a2d469-35da-4253-b3a5-057b68c4d68b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "e1a2d469-35da-4253-b3a5-057b68c4d68b" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:31:38 crc kubenswrapper[5021]: I0121 15:31:38.459522 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e1a2d469-35da-4253-b3a5-057b68c4d68b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "e1a2d469-35da-4253-b3a5-057b68c4d68b" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:31:38 crc kubenswrapper[5021]: I0121 15:31:38.460421 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e1a2d469-35da-4253-b3a5-057b68c4d68b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "e1a2d469-35da-4253-b3a5-057b68c4d68b" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:31:38 crc kubenswrapper[5021]: I0121 15:31:38.460997 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e1a2d469-35da-4253-b3a5-057b68c4d68b-kube-api-access-rj5gj" (OuterVolumeSpecName: "kube-api-access-rj5gj") pod "e1a2d469-35da-4253-b3a5-057b68c4d68b" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b"). InnerVolumeSpecName "kube-api-access-rj5gj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:31:38 crc kubenswrapper[5021]: I0121 15:31:38.462560 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e1a2d469-35da-4253-b3a5-057b68c4d68b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "e1a2d469-35da-4253-b3a5-057b68c4d68b" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:31:38 crc kubenswrapper[5021]: I0121 15:31:38.463802 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "e1a2d469-35da-4253-b3a5-057b68c4d68b" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 21 15:31:38 crc kubenswrapper[5021]: I0121 15:31:38.500051 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e1a2d469-35da-4253-b3a5-057b68c4d68b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "e1a2d469-35da-4253-b3a5-057b68c4d68b" (UID: "e1a2d469-35da-4253-b3a5-057b68c4d68b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:31:38 crc kubenswrapper[5021]: I0121 15:31:38.553458 5021 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/e1a2d469-35da-4253-b3a5-057b68c4d68b-registry-certificates\") on node \"crc\" DevicePath \"\"" Jan 21 15:31:38 crc kubenswrapper[5021]: I0121 15:31:38.553505 5021 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/e1a2d469-35da-4253-b3a5-057b68c4d68b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Jan 21 15:31:38 crc kubenswrapper[5021]: I0121 15:31:38.553517 5021 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/e1a2d469-35da-4253-b3a5-057b68c4d68b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Jan 21 15:31:38 crc kubenswrapper[5021]: I0121 15:31:38.553529 5021 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/e1a2d469-35da-4253-b3a5-057b68c4d68b-registry-tls\") on node \"crc\" DevicePath \"\"" Jan 21 15:31:38 crc kubenswrapper[5021]: I0121 15:31:38.553540 5021 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e1a2d469-35da-4253-b3a5-057b68c4d68b-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 21 15:31:38 crc kubenswrapper[5021]: I0121 15:31:38.553548 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rj5gj\" (UniqueName: \"kubernetes.io/projected/e1a2d469-35da-4253-b3a5-057b68c4d68b-kube-api-access-rj5gj\") on node \"crc\" DevicePath \"\"" Jan 21 15:31:38 crc kubenswrapper[5021]: I0121 15:31:38.553556 5021 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/e1a2d469-35da-4253-b3a5-057b68c4d68b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 21 15:31:38 crc kubenswrapper[5021]: I0121 15:31:38.726176 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-zmqhz"] Jan 21 15:31:38 crc kubenswrapper[5021]: I0121 15:31:38.731824 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-zmqhz"] Jan 21 15:31:38 crc kubenswrapper[5021]: I0121 15:31:38.747023 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e1a2d469-35da-4253-b3a5-057b68c4d68b" path="/var/lib/kubelet/pods/e1a2d469-35da-4253-b3a5-057b68c4d68b/volumes" Jan 21 15:33:12 crc kubenswrapper[5021]: I0121 15:33:12.357110 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 15:33:12 crc kubenswrapper[5021]: I0121 15:33:12.357879 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 15:33:19 crc kubenswrapper[5021]: I0121 15:33:19.002060 5021 scope.go:117] "RemoveContainer" containerID="585f1d340aa37dd148d32e3cd98d6c3809a943cdca402faa615f801d805849bc" Jan 21 15:33:42 crc kubenswrapper[5021]: I0121 15:33:42.357673 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 15:33:42 crc kubenswrapper[5021]: I0121 15:33:42.358714 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 15:34:12 crc kubenswrapper[5021]: I0121 15:34:12.356524 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 15:34:12 crc kubenswrapper[5021]: I0121 15:34:12.357239 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 15:34:12 crc kubenswrapper[5021]: I0121 15:34:12.357284 5021 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" Jan 21 15:34:12 crc kubenswrapper[5021]: I0121 15:34:12.357831 5021 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9223686c7e9f3e200942601b9f2ab70d47dcf8ddc5c6e0c3f6d57943b68fa733"} pod="openshift-machine-config-operator/machine-config-daemon-n22xz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 21 15:34:12 crc kubenswrapper[5021]: I0121 15:34:12.357889 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" containerID="cri-o://9223686c7e9f3e200942601b9f2ab70d47dcf8ddc5c6e0c3f6d57943b68fa733" gracePeriod=600 Jan 21 15:34:13 crc kubenswrapper[5021]: I0121 15:34:13.371557 5021 generic.go:334] "Generic (PLEG): container finished" podID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerID="9223686c7e9f3e200942601b9f2ab70d47dcf8ddc5c6e0c3f6d57943b68fa733" exitCode=0 Jan 21 15:34:13 crc kubenswrapper[5021]: I0121 15:34:13.371630 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" event={"ID":"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1","Type":"ContainerDied","Data":"9223686c7e9f3e200942601b9f2ab70d47dcf8ddc5c6e0c3f6d57943b68fa733"} Jan 21 15:34:13 crc kubenswrapper[5021]: I0121 15:34:13.372293 5021 scope.go:117] "RemoveContainer" containerID="c8a334c0d1d06dd287b03b7bbb8d0161ca638f7571e2c8577f63c3b4d6b95f04" Jan 21 15:34:14 crc kubenswrapper[5021]: I0121 15:34:14.385359 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" event={"ID":"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1","Type":"ContainerStarted","Data":"430f8628012005aeb399f1f5973bccc495f1ff35af45c0e956eec98511b34a03"} Jan 21 15:34:19 crc kubenswrapper[5021]: I0121 15:34:19.051875 5021 scope.go:117] "RemoveContainer" containerID="03389cea96dde3a168d28bc41734f72658b38df0a300d504cf16bcfa82e065c2" Jan 21 15:34:19 crc kubenswrapper[5021]: I0121 15:34:19.084491 5021 scope.go:117] "RemoveContainer" containerID="25209e10f6a6ebcd7ff92784ba20d40a2ab48f9b4460622c55d38d8d1527064b" Jan 21 15:35:06 crc kubenswrapper[5021]: I0121 15:35:06.608981 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-9flhm"] Jan 21 15:35:06 crc kubenswrapper[5021]: I0121 15:35:06.610869 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerName="ovn-controller" containerID="cri-o://5ed7c1837f033493cb08cde88c75467a9fa1dda6ca0f1c72bb2f0c9dccfd0773" gracePeriod=30 Jan 21 15:35:06 crc kubenswrapper[5021]: I0121 15:35:06.611153 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerName="kube-rbac-proxy-node" containerID="cri-o://dbaeb819c15bc6d2c7a27387f825f6f2b84a9671ca0d4440fc56b98c9bab7b52" gracePeriod=30 Jan 21 15:35:06 crc kubenswrapper[5021]: I0121 15:35:06.611147 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://bf2232b9e5a3d00ab5e6640b47c6fddffc45b84322505c86450c23e884607366" gracePeriod=30 Jan 21 15:35:06 crc kubenswrapper[5021]: I0121 15:35:06.611243 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerName="ovn-acl-logging" containerID="cri-o://3346a404ecc46aaf27f9eebec820eceb15899c2da83fd513d8c1fde406d47ff8" gracePeriod=30 Jan 21 15:35:06 crc kubenswrapper[5021]: I0121 15:35:06.611408 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerName="northd" containerID="cri-o://33a648b0889c361c99d72d947f51b4bf9ed5373bb3192c06f787a46be05694d8" gracePeriod=30 Jan 21 15:35:06 crc kubenswrapper[5021]: I0121 15:35:06.611945 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerName="sbdb" containerID="cri-o://fb81f970e9b92212535b8961f324f7e9904f3ead656d5aea28d1a7054245ede8" gracePeriod=30 Jan 21 15:35:06 crc kubenswrapper[5021]: I0121 15:35:06.612074 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerName="nbdb" containerID="cri-o://ee527cf2f3d51077144d81d16dbb6b3563f6e40e30ba92d502b09801ecbca2ee" gracePeriod=30 Jan 21 15:35:07 crc kubenswrapper[5021]: I0121 15:35:07.231198 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerName="ovnkube-controller" containerID="cri-o://a5bb94789cb1dce31aaed81c325ae13132b10033281ae65f6ce37e91b74f6b8c" gracePeriod=30 Jan 21 15:35:07 crc kubenswrapper[5021]: E0121 15:35:07.351833 5021 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2e3f3965_4473_46d7_a613_2ed3e4b10ad7.slice/crio-5ed7c1837f033493cb08cde88c75467a9fa1dda6ca0f1c72bb2f0c9dccfd0773.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2e3f3965_4473_46d7_a613_2ed3e4b10ad7.slice/crio-bf2232b9e5a3d00ab5e6640b47c6fddffc45b84322505c86450c23e884607366.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2e3f3965_4473_46d7_a613_2ed3e4b10ad7.slice/crio-dbaeb819c15bc6d2c7a27387f825f6f2b84a9671ca0d4440fc56b98c9bab7b52.scope\": RecentStats: unable to find data in memory cache]" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.215104 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-sd7j9_49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a/kube-multus/2.log" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.216355 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-sd7j9_49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a/kube-multus/1.log" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.216397 5021 generic.go:334] "Generic (PLEG): container finished" podID="49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a" containerID="40ece78bf00f59d0318b189d59d2347204f425bb98265e7412a6ecfb2793009f" exitCode=2 Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.216466 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-sd7j9" event={"ID":"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a","Type":"ContainerDied","Data":"40ece78bf00f59d0318b189d59d2347204f425bb98265e7412a6ecfb2793009f"} Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.216529 5021 scope.go:117] "RemoveContainer" containerID="9276d2f11794c73e2d9b67ba12b81ec547e14eb6a5808fb86f64d46a12cffcd3" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.217208 5021 scope.go:117] "RemoveContainer" containerID="40ece78bf00f59d0318b189d59d2347204f425bb98265e7412a6ecfb2793009f" Jan 21 15:35:08 crc kubenswrapper[5021]: E0121 15:35:08.217592 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-sd7j9_openshift-multus(49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a)\"" pod="openshift-multus/multus-sd7j9" podUID="49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.223459 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-9flhm_2e3f3965-4473-46d7-a613-2ed3e4b10ad7/ovnkube-controller/3.log" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.227060 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-9flhm_2e3f3965-4473-46d7-a613-2ed3e4b10ad7/ovn-acl-logging/0.log" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.227810 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-9flhm_2e3f3965-4473-46d7-a613-2ed3e4b10ad7/ovn-controller/0.log" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.228772 5021 generic.go:334] "Generic (PLEG): container finished" podID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerID="a5bb94789cb1dce31aaed81c325ae13132b10033281ae65f6ce37e91b74f6b8c" exitCode=0 Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.228822 5021 generic.go:334] "Generic (PLEG): container finished" podID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerID="fb81f970e9b92212535b8961f324f7e9904f3ead656d5aea28d1a7054245ede8" exitCode=0 Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.228835 5021 generic.go:334] "Generic (PLEG): container finished" podID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerID="ee527cf2f3d51077144d81d16dbb6b3563f6e40e30ba92d502b09801ecbca2ee" exitCode=0 Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.228847 5021 generic.go:334] "Generic (PLEG): container finished" podID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerID="33a648b0889c361c99d72d947f51b4bf9ed5373bb3192c06f787a46be05694d8" exitCode=0 Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.228859 5021 generic.go:334] "Generic (PLEG): container finished" podID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerID="bf2232b9e5a3d00ab5e6640b47c6fddffc45b84322505c86450c23e884607366" exitCode=0 Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.228870 5021 generic.go:334] "Generic (PLEG): container finished" podID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerID="dbaeb819c15bc6d2c7a27387f825f6f2b84a9671ca0d4440fc56b98c9bab7b52" exitCode=0 Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.228859 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" event={"ID":"2e3f3965-4473-46d7-a613-2ed3e4b10ad7","Type":"ContainerDied","Data":"a5bb94789cb1dce31aaed81c325ae13132b10033281ae65f6ce37e91b74f6b8c"} Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.228990 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" event={"ID":"2e3f3965-4473-46d7-a613-2ed3e4b10ad7","Type":"ContainerDied","Data":"fb81f970e9b92212535b8961f324f7e9904f3ead656d5aea28d1a7054245ede8"} Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.229029 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" event={"ID":"2e3f3965-4473-46d7-a613-2ed3e4b10ad7","Type":"ContainerDied","Data":"ee527cf2f3d51077144d81d16dbb6b3563f6e40e30ba92d502b09801ecbca2ee"} Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.229057 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" event={"ID":"2e3f3965-4473-46d7-a613-2ed3e4b10ad7","Type":"ContainerDied","Data":"33a648b0889c361c99d72d947f51b4bf9ed5373bb3192c06f787a46be05694d8"} Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.229083 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" event={"ID":"2e3f3965-4473-46d7-a613-2ed3e4b10ad7","Type":"ContainerDied","Data":"bf2232b9e5a3d00ab5e6640b47c6fddffc45b84322505c86450c23e884607366"} Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.229102 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" event={"ID":"2e3f3965-4473-46d7-a613-2ed3e4b10ad7","Type":"ContainerDied","Data":"dbaeb819c15bc6d2c7a27387f825f6f2b84a9671ca0d4440fc56b98c9bab7b52"} Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.229127 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" event={"ID":"2e3f3965-4473-46d7-a613-2ed3e4b10ad7","Type":"ContainerDied","Data":"3346a404ecc46aaf27f9eebec820eceb15899c2da83fd513d8c1fde406d47ff8"} Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.228881 5021 generic.go:334] "Generic (PLEG): container finished" podID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerID="3346a404ecc46aaf27f9eebec820eceb15899c2da83fd513d8c1fde406d47ff8" exitCode=143 Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.229175 5021 generic.go:334] "Generic (PLEG): container finished" podID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerID="5ed7c1837f033493cb08cde88c75467a9fa1dda6ca0f1c72bb2f0c9dccfd0773" exitCode=143 Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.229255 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" event={"ID":"2e3f3965-4473-46d7-a613-2ed3e4b10ad7","Type":"ContainerDied","Data":"5ed7c1837f033493cb08cde88c75467a9fa1dda6ca0f1c72bb2f0c9dccfd0773"} Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.260499 5021 scope.go:117] "RemoveContainer" containerID="c7a8ac23da1f14fa76303b89fa5f285acf38ba3869b2cb7ccc9551612ba86912" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.587121 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-9flhm_2e3f3965-4473-46d7-a613-2ed3e4b10ad7/ovn-acl-logging/0.log" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.587829 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-9flhm_2e3f3965-4473-46d7-a613-2ed3e4b10ad7/ovn-controller/0.log" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.588475 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.653627 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-rzt28"] Jan 21 15:35:08 crc kubenswrapper[5021]: E0121 15:35:08.654027 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerName="ovn-controller" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.654078 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerName="ovn-controller" Jan 21 15:35:08 crc kubenswrapper[5021]: E0121 15:35:08.654112 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerName="nbdb" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.654120 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerName="nbdb" Jan 21 15:35:08 crc kubenswrapper[5021]: E0121 15:35:08.654131 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerName="ovnkube-controller" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.654139 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerName="ovnkube-controller" Jan 21 15:35:08 crc kubenswrapper[5021]: E0121 15:35:08.654147 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerName="kube-rbac-proxy-ovn-metrics" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.654153 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerName="kube-rbac-proxy-ovn-metrics" Jan 21 15:35:08 crc kubenswrapper[5021]: E0121 15:35:08.654165 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerName="ovnkube-controller" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.654173 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerName="ovnkube-controller" Jan 21 15:35:08 crc kubenswrapper[5021]: E0121 15:35:08.654187 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e1a2d469-35da-4253-b3a5-057b68c4d68b" containerName="registry" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.654195 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="e1a2d469-35da-4253-b3a5-057b68c4d68b" containerName="registry" Jan 21 15:35:08 crc kubenswrapper[5021]: E0121 15:35:08.654208 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerName="kube-rbac-proxy-node" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.654216 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerName="kube-rbac-proxy-node" Jan 21 15:35:08 crc kubenswrapper[5021]: E0121 15:35:08.654226 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerName="kubecfg-setup" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.654233 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerName="kubecfg-setup" Jan 21 15:35:08 crc kubenswrapper[5021]: E0121 15:35:08.654244 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerName="ovnkube-controller" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.654251 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerName="ovnkube-controller" Jan 21 15:35:08 crc kubenswrapper[5021]: E0121 15:35:08.654260 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerName="sbdb" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.654267 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerName="sbdb" Jan 21 15:35:08 crc kubenswrapper[5021]: E0121 15:35:08.654278 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerName="northd" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.654285 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerName="northd" Jan 21 15:35:08 crc kubenswrapper[5021]: E0121 15:35:08.654293 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerName="ovn-acl-logging" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.654301 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerName="ovn-acl-logging" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.654427 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerName="ovnkube-controller" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.654440 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerName="kube-rbac-proxy-ovn-metrics" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.654450 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerName="nbdb" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.654458 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerName="ovnkube-controller" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.654472 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerName="ovn-controller" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.654481 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerName="ovnkube-controller" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.654490 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerName="northd" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.654501 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerName="ovn-acl-logging" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.654513 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerName="kube-rbac-proxy-node" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.654522 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerName="ovnkube-controller" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.654531 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerName="sbdb" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.654543 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="e1a2d469-35da-4253-b3a5-057b68c4d68b" containerName="registry" Jan 21 15:35:08 crc kubenswrapper[5021]: E0121 15:35:08.654659 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerName="ovnkube-controller" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.654667 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerName="ovnkube-controller" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.654842 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerName="ovnkube-controller" Jan 21 15:35:08 crc kubenswrapper[5021]: E0121 15:35:08.655053 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerName="ovnkube-controller" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.655062 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" containerName="ovnkube-controller" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.658621 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.784665 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-host-run-ovn-kubernetes\") pod \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.784730 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-ovn-node-metrics-cert\") pod \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.784759 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-node-log\") pod \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.784782 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-host-kubelet\") pod \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.784799 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "2e3f3965-4473-46d7-a613-2ed3e4b10ad7" (UID: "2e3f3965-4473-46d7-a613-2ed3e4b10ad7"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.784809 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-run-systemd\") pod \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.784857 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-log-socket\") pod \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.784931 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-ovnkube-config\") pod \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.784956 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-etc-openvswitch\") pod \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.784995 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-host-cni-bin\") pod \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.784984 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-node-log" (OuterVolumeSpecName: "node-log") pod "2e3f3965-4473-46d7-a613-2ed3e4b10ad7" (UID: "2e3f3965-4473-46d7-a613-2ed3e4b10ad7"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.785041 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-env-overrides\") pod \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.785055 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "2e3f3965-4473-46d7-a613-2ed3e4b10ad7" (UID: "2e3f3965-4473-46d7-a613-2ed3e4b10ad7"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.785005 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "2e3f3965-4473-46d7-a613-2ed3e4b10ad7" (UID: "2e3f3965-4473-46d7-a613-2ed3e4b10ad7"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.785093 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "2e3f3965-4473-46d7-a613-2ed3e4b10ad7" (UID: "2e3f3965-4473-46d7-a613-2ed3e4b10ad7"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.785065 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-host-run-netns\") pod \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.785138 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-ovnkube-script-lib\") pod \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.785128 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-log-socket" (OuterVolumeSpecName: "log-socket") pod "2e3f3965-4473-46d7-a613-2ed3e4b10ad7" (UID: "2e3f3965-4473-46d7-a613-2ed3e4b10ad7"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.785164 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "2e3f3965-4473-46d7-a613-2ed3e4b10ad7" (UID: "2e3f3965-4473-46d7-a613-2ed3e4b10ad7"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.785174 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-host-var-lib-cni-networks-ovn-kubernetes\") pod \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.785201 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "2e3f3965-4473-46d7-a613-2ed3e4b10ad7" (UID: "2e3f3965-4473-46d7-a613-2ed3e4b10ad7"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.785317 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5k67c\" (UniqueName: \"kubernetes.io/projected/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-kube-api-access-5k67c\") pod \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.785359 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-host-slash\") pod \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.785398 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-run-openvswitch\") pod \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.785417 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-var-lib-openvswitch\") pod \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.785517 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-systemd-units\") pod \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.785588 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-run-ovn\") pod \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.785622 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-host-cni-netd\") pod \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\" (UID: \"2e3f3965-4473-46d7-a613-2ed3e4b10ad7\") " Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.785638 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "2e3f3965-4473-46d7-a613-2ed3e4b10ad7" (UID: "2e3f3965-4473-46d7-a613-2ed3e4b10ad7"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.785681 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "2e3f3965-4473-46d7-a613-2ed3e4b10ad7" (UID: "2e3f3965-4473-46d7-a613-2ed3e4b10ad7"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.785729 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "2e3f3965-4473-46d7-a613-2ed3e4b10ad7" (UID: "2e3f3965-4473-46d7-a613-2ed3e4b10ad7"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.785755 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "2e3f3965-4473-46d7-a613-2ed3e4b10ad7" (UID: "2e3f3965-4473-46d7-a613-2ed3e4b10ad7"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.785774 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-host-slash" (OuterVolumeSpecName: "host-slash") pod "2e3f3965-4473-46d7-a613-2ed3e4b10ad7" (UID: "2e3f3965-4473-46d7-a613-2ed3e4b10ad7"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.785811 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "2e3f3965-4473-46d7-a613-2ed3e4b10ad7" (UID: "2e3f3965-4473-46d7-a613-2ed3e4b10ad7"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.785812 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "2e3f3965-4473-46d7-a613-2ed3e4b10ad7" (UID: "2e3f3965-4473-46d7-a613-2ed3e4b10ad7"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.785850 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "2e3f3965-4473-46d7-a613-2ed3e4b10ad7" (UID: "2e3f3965-4473-46d7-a613-2ed3e4b10ad7"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.785851 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "2e3f3965-4473-46d7-a613-2ed3e4b10ad7" (UID: "2e3f3965-4473-46d7-a613-2ed3e4b10ad7"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.785901 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3f0969bb-8f07-4571-8cfd-f7682a252ac8-env-overrides\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.785952 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3f0969bb-8f07-4571-8cfd-f7682a252ac8-run-openvswitch\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.786034 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3f0969bb-8f07-4571-8cfd-f7682a252ac8-host-run-ovn-kubernetes\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.786068 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zxdj8\" (UniqueName: \"kubernetes.io/projected/3f0969bb-8f07-4571-8cfd-f7682a252ac8-kube-api-access-zxdj8\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.786090 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3f0969bb-8f07-4571-8cfd-f7682a252ac8-ovnkube-script-lib\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.786113 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3f0969bb-8f07-4571-8cfd-f7682a252ac8-host-kubelet\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.786187 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3f0969bb-8f07-4571-8cfd-f7682a252ac8-host-cni-netd\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.786228 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3f0969bb-8f07-4571-8cfd-f7682a252ac8-node-log\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.786312 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3f0969bb-8f07-4571-8cfd-f7682a252ac8-etc-openvswitch\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.786400 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3f0969bb-8f07-4571-8cfd-f7682a252ac8-run-ovn\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.786456 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3f0969bb-8f07-4571-8cfd-f7682a252ac8-ovnkube-config\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.786502 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3f0969bb-8f07-4571-8cfd-f7682a252ac8-host-run-netns\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.786521 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3f0969bb-8f07-4571-8cfd-f7682a252ac8-host-slash\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.786556 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3f0969bb-8f07-4571-8cfd-f7682a252ac8-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.786606 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3f0969bb-8f07-4571-8cfd-f7682a252ac8-systemd-units\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.786634 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3f0969bb-8f07-4571-8cfd-f7682a252ac8-var-lib-openvswitch\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.786659 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3f0969bb-8f07-4571-8cfd-f7682a252ac8-log-socket\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.786751 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3f0969bb-8f07-4571-8cfd-f7682a252ac8-host-cni-bin\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.786789 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/3f0969bb-8f07-4571-8cfd-f7682a252ac8-run-systemd\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.786808 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3f0969bb-8f07-4571-8cfd-f7682a252ac8-ovn-node-metrics-cert\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.786882 5021 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.786894 5021 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-run-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.786930 5021 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-systemd-units\") on node \"crc\" DevicePath \"\"" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.786989 5021 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.787022 5021 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-host-cni-netd\") on node \"crc\" DevicePath \"\"" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.787038 5021 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-node-log\") on node \"crc\" DevicePath \"\"" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.787053 5021 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.787077 5021 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-host-kubelet\") on node \"crc\" DevicePath \"\"" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.787087 5021 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-log-socket\") on node \"crc\" DevicePath \"\"" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.787100 5021 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.787113 5021 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.787124 5021 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-host-cni-bin\") on node \"crc\" DevicePath \"\"" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.787135 5021 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.787146 5021 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-host-run-netns\") on node \"crc\" DevicePath \"\"" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.787157 5021 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.787170 5021 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.787182 5021 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-host-slash\") on node \"crc\" DevicePath \"\"" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.793482 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "2e3f3965-4473-46d7-a613-2ed3e4b10ad7" (UID: "2e3f3965-4473-46d7-a613-2ed3e4b10ad7"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.794964 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-kube-api-access-5k67c" (OuterVolumeSpecName: "kube-api-access-5k67c") pod "2e3f3965-4473-46d7-a613-2ed3e4b10ad7" (UID: "2e3f3965-4473-46d7-a613-2ed3e4b10ad7"). InnerVolumeSpecName "kube-api-access-5k67c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.801269 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "2e3f3965-4473-46d7-a613-2ed3e4b10ad7" (UID: "2e3f3965-4473-46d7-a613-2ed3e4b10ad7"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.888134 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3f0969bb-8f07-4571-8cfd-f7682a252ac8-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.888204 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3f0969bb-8f07-4571-8cfd-f7682a252ac8-systemd-units\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.888247 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3f0969bb-8f07-4571-8cfd-f7682a252ac8-var-lib-openvswitch\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.888273 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3f0969bb-8f07-4571-8cfd-f7682a252ac8-log-socket\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.888312 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3f0969bb-8f07-4571-8cfd-f7682a252ac8-host-cni-bin\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.888336 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/3f0969bb-8f07-4571-8cfd-f7682a252ac8-run-systemd\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.888357 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3f0969bb-8f07-4571-8cfd-f7682a252ac8-ovn-node-metrics-cert\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.888363 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3f0969bb-8f07-4571-8cfd-f7682a252ac8-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.888410 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/3f0969bb-8f07-4571-8cfd-f7682a252ac8-run-systemd\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.888377 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3f0969bb-8f07-4571-8cfd-f7682a252ac8-log-socket\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.888417 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3f0969bb-8f07-4571-8cfd-f7682a252ac8-env-overrides\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.888376 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3f0969bb-8f07-4571-8cfd-f7682a252ac8-var-lib-openvswitch\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.888488 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3f0969bb-8f07-4571-8cfd-f7682a252ac8-host-cni-bin\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.888615 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3f0969bb-8f07-4571-8cfd-f7682a252ac8-systemd-units\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.888624 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3f0969bb-8f07-4571-8cfd-f7682a252ac8-run-openvswitch\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.888671 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3f0969bb-8f07-4571-8cfd-f7682a252ac8-run-openvswitch\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.888699 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3f0969bb-8f07-4571-8cfd-f7682a252ac8-host-run-ovn-kubernetes\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.888725 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zxdj8\" (UniqueName: \"kubernetes.io/projected/3f0969bb-8f07-4571-8cfd-f7682a252ac8-kube-api-access-zxdj8\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.888751 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3f0969bb-8f07-4571-8cfd-f7682a252ac8-ovnkube-script-lib\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.888773 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3f0969bb-8f07-4571-8cfd-f7682a252ac8-host-kubelet\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.888774 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3f0969bb-8f07-4571-8cfd-f7682a252ac8-host-run-ovn-kubernetes\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.888792 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3f0969bb-8f07-4571-8cfd-f7682a252ac8-host-cni-netd\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.888926 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3f0969bb-8f07-4571-8cfd-f7682a252ac8-host-kubelet\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.888970 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3f0969bb-8f07-4571-8cfd-f7682a252ac8-node-log\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.889183 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3f0969bb-8f07-4571-8cfd-f7682a252ac8-node-log\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.889241 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3f0969bb-8f07-4571-8cfd-f7682a252ac8-etc-openvswitch\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.889272 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3f0969bb-8f07-4571-8cfd-f7682a252ac8-etc-openvswitch\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.889285 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3f0969bb-8f07-4571-8cfd-f7682a252ac8-host-cni-netd\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.889336 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3f0969bb-8f07-4571-8cfd-f7682a252ac8-run-ovn\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.889380 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3f0969bb-8f07-4571-8cfd-f7682a252ac8-ovnkube-config\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.889437 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3f0969bb-8f07-4571-8cfd-f7682a252ac8-host-slash\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.889469 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3f0969bb-8f07-4571-8cfd-f7682a252ac8-host-run-netns\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.889479 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3f0969bb-8f07-4571-8cfd-f7682a252ac8-run-ovn\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.889575 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3f0969bb-8f07-4571-8cfd-f7682a252ac8-host-run-netns\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.889634 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3f0969bb-8f07-4571-8cfd-f7682a252ac8-env-overrides\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.889755 5021 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-run-systemd\") on node \"crc\" DevicePath \"\"" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.889793 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3f0969bb-8f07-4571-8cfd-f7682a252ac8-host-slash\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.889815 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5k67c\" (UniqueName: \"kubernetes.io/projected/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-kube-api-access-5k67c\") on node \"crc\" DevicePath \"\"" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.889838 5021 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/2e3f3965-4473-46d7-a613-2ed3e4b10ad7-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.889999 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3f0969bb-8f07-4571-8cfd-f7682a252ac8-ovnkube-script-lib\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.890362 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3f0969bb-8f07-4571-8cfd-f7682a252ac8-ovnkube-config\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.893840 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3f0969bb-8f07-4571-8cfd-f7682a252ac8-ovn-node-metrics-cert\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.906178 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zxdj8\" (UniqueName: \"kubernetes.io/projected/3f0969bb-8f07-4571-8cfd-f7682a252ac8-kube-api-access-zxdj8\") pod \"ovnkube-node-rzt28\" (UID: \"3f0969bb-8f07-4571-8cfd-f7682a252ac8\") " pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:08 crc kubenswrapper[5021]: I0121 15:35:08.974097 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:09 crc kubenswrapper[5021]: W0121 15:35:09.000468 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3f0969bb_8f07_4571_8cfd_f7682a252ac8.slice/crio-49e2a4b4f67309dedc511bb652ca20eaf93471f996ae655db548ccd02ccd4ffd WatchSource:0}: Error finding container 49e2a4b4f67309dedc511bb652ca20eaf93471f996ae655db548ccd02ccd4ffd: Status 404 returned error can't find the container with id 49e2a4b4f67309dedc511bb652ca20eaf93471f996ae655db548ccd02ccd4ffd Jan 21 15:35:09 crc kubenswrapper[5021]: I0121 15:35:09.238955 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-sd7j9_49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a/kube-multus/2.log" Jan 21 15:35:09 crc kubenswrapper[5021]: I0121 15:35:09.239946 5021 generic.go:334] "Generic (PLEG): container finished" podID="3f0969bb-8f07-4571-8cfd-f7682a252ac8" containerID="7bfd4865a9b98ece262c14ec930dfebfc3271aa2df25bb61436e7ae240d0fdf5" exitCode=0 Jan 21 15:35:09 crc kubenswrapper[5021]: I0121 15:35:09.240004 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" event={"ID":"3f0969bb-8f07-4571-8cfd-f7682a252ac8","Type":"ContainerDied","Data":"7bfd4865a9b98ece262c14ec930dfebfc3271aa2df25bb61436e7ae240d0fdf5"} Jan 21 15:35:09 crc kubenswrapper[5021]: I0121 15:35:09.240034 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" event={"ID":"3f0969bb-8f07-4571-8cfd-f7682a252ac8","Type":"ContainerStarted","Data":"49e2a4b4f67309dedc511bb652ca20eaf93471f996ae655db548ccd02ccd4ffd"} Jan 21 15:35:09 crc kubenswrapper[5021]: I0121 15:35:09.246850 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-9flhm_2e3f3965-4473-46d7-a613-2ed3e4b10ad7/ovn-acl-logging/0.log" Jan 21 15:35:09 crc kubenswrapper[5021]: I0121 15:35:09.247404 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-9flhm_2e3f3965-4473-46d7-a613-2ed3e4b10ad7/ovn-controller/0.log" Jan 21 15:35:09 crc kubenswrapper[5021]: I0121 15:35:09.247827 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" event={"ID":"2e3f3965-4473-46d7-a613-2ed3e4b10ad7","Type":"ContainerDied","Data":"6712a107b7189696d54af4eb09f71e34ffcfe2dc4b07e1763c6b232a677bc1d1"} Jan 21 15:35:09 crc kubenswrapper[5021]: I0121 15:35:09.247864 5021 scope.go:117] "RemoveContainer" containerID="a5bb94789cb1dce31aaed81c325ae13132b10033281ae65f6ce37e91b74f6b8c" Jan 21 15:35:09 crc kubenswrapper[5021]: I0121 15:35:09.247883 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-9flhm" Jan 21 15:35:09 crc kubenswrapper[5021]: I0121 15:35:09.272268 5021 scope.go:117] "RemoveContainer" containerID="fb81f970e9b92212535b8961f324f7e9904f3ead656d5aea28d1a7054245ede8" Jan 21 15:35:09 crc kubenswrapper[5021]: I0121 15:35:09.293078 5021 scope.go:117] "RemoveContainer" containerID="ee527cf2f3d51077144d81d16dbb6b3563f6e40e30ba92d502b09801ecbca2ee" Jan 21 15:35:09 crc kubenswrapper[5021]: I0121 15:35:09.304086 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-9flhm"] Jan 21 15:35:09 crc kubenswrapper[5021]: I0121 15:35:09.307282 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-9flhm"] Jan 21 15:35:09 crc kubenswrapper[5021]: I0121 15:35:09.342379 5021 scope.go:117] "RemoveContainer" containerID="33a648b0889c361c99d72d947f51b4bf9ed5373bb3192c06f787a46be05694d8" Jan 21 15:35:09 crc kubenswrapper[5021]: I0121 15:35:09.358609 5021 scope.go:117] "RemoveContainer" containerID="bf2232b9e5a3d00ab5e6640b47c6fddffc45b84322505c86450c23e884607366" Jan 21 15:35:09 crc kubenswrapper[5021]: I0121 15:35:09.375888 5021 scope.go:117] "RemoveContainer" containerID="dbaeb819c15bc6d2c7a27387f825f6f2b84a9671ca0d4440fc56b98c9bab7b52" Jan 21 15:35:09 crc kubenswrapper[5021]: I0121 15:35:09.392598 5021 scope.go:117] "RemoveContainer" containerID="3346a404ecc46aaf27f9eebec820eceb15899c2da83fd513d8c1fde406d47ff8" Jan 21 15:35:09 crc kubenswrapper[5021]: I0121 15:35:09.408603 5021 scope.go:117] "RemoveContainer" containerID="5ed7c1837f033493cb08cde88c75467a9fa1dda6ca0f1c72bb2f0c9dccfd0773" Jan 21 15:35:09 crc kubenswrapper[5021]: I0121 15:35:09.427393 5021 scope.go:117] "RemoveContainer" containerID="1bcecd612e874cc3aaa687ec08447719406b2b6254959d6c2bec427ed97761be" Jan 21 15:35:10 crc kubenswrapper[5021]: I0121 15:35:10.259469 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" event={"ID":"3f0969bb-8f07-4571-8cfd-f7682a252ac8","Type":"ContainerStarted","Data":"981594076b4a0caa1c4ecdb0b1346be8cc54600fa5276c8f8694ffd33135e887"} Jan 21 15:35:10 crc kubenswrapper[5021]: I0121 15:35:10.259996 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" event={"ID":"3f0969bb-8f07-4571-8cfd-f7682a252ac8","Type":"ContainerStarted","Data":"582876d76e8d48adfd1ce2413cf96eea6360a6988338cb046525441f425402fd"} Jan 21 15:35:10 crc kubenswrapper[5021]: I0121 15:35:10.260015 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" event={"ID":"3f0969bb-8f07-4571-8cfd-f7682a252ac8","Type":"ContainerStarted","Data":"b2f8604d1be0b62b66f286523aa8a9b2c62ac327d7b9ccffbb546e8c10641392"} Jan 21 15:35:10 crc kubenswrapper[5021]: I0121 15:35:10.260031 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" event={"ID":"3f0969bb-8f07-4571-8cfd-f7682a252ac8","Type":"ContainerStarted","Data":"afe2df1271a3b1d4b121f2c819771e45a0dc0f31b9868d6d3bf8dddd64f36854"} Jan 21 15:35:10 crc kubenswrapper[5021]: I0121 15:35:10.747091 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2e3f3965-4473-46d7-a613-2ed3e4b10ad7" path="/var/lib/kubelet/pods/2e3f3965-4473-46d7-a613-2ed3e4b10ad7/volumes" Jan 21 15:35:11 crc kubenswrapper[5021]: I0121 15:35:11.271463 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" event={"ID":"3f0969bb-8f07-4571-8cfd-f7682a252ac8","Type":"ContainerStarted","Data":"eb4675cb27f9280f31773642469d668aeff7eb08f0a50ee9dfeed0740e44010d"} Jan 21 15:35:11 crc kubenswrapper[5021]: I0121 15:35:11.271518 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" event={"ID":"3f0969bb-8f07-4571-8cfd-f7682a252ac8","Type":"ContainerStarted","Data":"81c309eaff40a91ba364810e0538e020780482243ba51665f4136f6a73d705be"} Jan 21 15:35:13 crc kubenswrapper[5021]: I0121 15:35:13.287396 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" event={"ID":"3f0969bb-8f07-4571-8cfd-f7682a252ac8","Type":"ContainerStarted","Data":"be354bc1d514331a4c03cb24376e4e40aaac725709f39a1b7854a152254d98e7"} Jan 21 15:35:15 crc kubenswrapper[5021]: I0121 15:35:15.303334 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" event={"ID":"3f0969bb-8f07-4571-8cfd-f7682a252ac8","Type":"ContainerStarted","Data":"2ca010c0a8491fda7d6642dfaa321da3b53c62e7f6936e101d93fe97fbf931af"} Jan 21 15:35:15 crc kubenswrapper[5021]: I0121 15:35:15.303743 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:15 crc kubenswrapper[5021]: I0121 15:35:15.303760 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:15 crc kubenswrapper[5021]: I0121 15:35:15.303771 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:15 crc kubenswrapper[5021]: I0121 15:35:15.332992 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" podStartSLOduration=7.332973774 podStartE2EDuration="7.332973774s" podCreationTimestamp="2026-01-21 15:35:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:35:15.329749244 +0000 UTC m=+656.864863133" watchObservedRunningTime="2026-01-21 15:35:15.332973774 +0000 UTC m=+656.868087663" Jan 21 15:35:15 crc kubenswrapper[5021]: I0121 15:35:15.336668 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:15 crc kubenswrapper[5021]: I0121 15:35:15.341189 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:15 crc kubenswrapper[5021]: I0121 15:35:15.872955 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["crc-storage/crc-storage-crc-dkftr"] Jan 21 15:35:15 crc kubenswrapper[5021]: I0121 15:35:15.873569 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-dkftr" Jan 21 15:35:15 crc kubenswrapper[5021]: I0121 15:35:15.875235 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"crc-storage" Jan 21 15:35:15 crc kubenswrapper[5021]: I0121 15:35:15.875261 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"openshift-service-ca.crt" Jan 21 15:35:15 crc kubenswrapper[5021]: I0121 15:35:15.875750 5021 reflector.go:368] Caches populated for *v1.Secret from object-"crc-storage"/"crc-storage-dockercfg-gw48g" Jan 21 15:35:15 crc kubenswrapper[5021]: I0121 15:35:15.876846 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"kube-root-ca.crt" Jan 21 15:35:15 crc kubenswrapper[5021]: I0121 15:35:15.884141 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-dkftr"] Jan 21 15:35:15 crc kubenswrapper[5021]: I0121 15:35:15.894356 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/89653110-7e4d-4527-b508-abefd7ca6497-node-mnt\") pod \"crc-storage-crc-dkftr\" (UID: \"89653110-7e4d-4527-b508-abefd7ca6497\") " pod="crc-storage/crc-storage-crc-dkftr" Jan 21 15:35:15 crc kubenswrapper[5021]: I0121 15:35:15.894417 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/89653110-7e4d-4527-b508-abefd7ca6497-crc-storage\") pod \"crc-storage-crc-dkftr\" (UID: \"89653110-7e4d-4527-b508-abefd7ca6497\") " pod="crc-storage/crc-storage-crc-dkftr" Jan 21 15:35:15 crc kubenswrapper[5021]: I0121 15:35:15.894550 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ln4s6\" (UniqueName: \"kubernetes.io/projected/89653110-7e4d-4527-b508-abefd7ca6497-kube-api-access-ln4s6\") pod \"crc-storage-crc-dkftr\" (UID: \"89653110-7e4d-4527-b508-abefd7ca6497\") " pod="crc-storage/crc-storage-crc-dkftr" Jan 21 15:35:15 crc kubenswrapper[5021]: I0121 15:35:15.996294 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/89653110-7e4d-4527-b508-abefd7ca6497-node-mnt\") pod \"crc-storage-crc-dkftr\" (UID: \"89653110-7e4d-4527-b508-abefd7ca6497\") " pod="crc-storage/crc-storage-crc-dkftr" Jan 21 15:35:15 crc kubenswrapper[5021]: I0121 15:35:15.996378 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/89653110-7e4d-4527-b508-abefd7ca6497-crc-storage\") pod \"crc-storage-crc-dkftr\" (UID: \"89653110-7e4d-4527-b508-abefd7ca6497\") " pod="crc-storage/crc-storage-crc-dkftr" Jan 21 15:35:15 crc kubenswrapper[5021]: I0121 15:35:15.996412 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ln4s6\" (UniqueName: \"kubernetes.io/projected/89653110-7e4d-4527-b508-abefd7ca6497-kube-api-access-ln4s6\") pod \"crc-storage-crc-dkftr\" (UID: \"89653110-7e4d-4527-b508-abefd7ca6497\") " pod="crc-storage/crc-storage-crc-dkftr" Jan 21 15:35:15 crc kubenswrapper[5021]: I0121 15:35:15.996623 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/89653110-7e4d-4527-b508-abefd7ca6497-node-mnt\") pod \"crc-storage-crc-dkftr\" (UID: \"89653110-7e4d-4527-b508-abefd7ca6497\") " pod="crc-storage/crc-storage-crc-dkftr" Jan 21 15:35:15 crc kubenswrapper[5021]: I0121 15:35:15.997067 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/89653110-7e4d-4527-b508-abefd7ca6497-crc-storage\") pod \"crc-storage-crc-dkftr\" (UID: \"89653110-7e4d-4527-b508-abefd7ca6497\") " pod="crc-storage/crc-storage-crc-dkftr" Jan 21 15:35:16 crc kubenswrapper[5021]: I0121 15:35:16.021888 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ln4s6\" (UniqueName: \"kubernetes.io/projected/89653110-7e4d-4527-b508-abefd7ca6497-kube-api-access-ln4s6\") pod \"crc-storage-crc-dkftr\" (UID: \"89653110-7e4d-4527-b508-abefd7ca6497\") " pod="crc-storage/crc-storage-crc-dkftr" Jan 21 15:35:16 crc kubenswrapper[5021]: I0121 15:35:16.187670 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-dkftr" Jan 21 15:35:16 crc kubenswrapper[5021]: E0121 15:35:16.209034 5021 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-dkftr_crc-storage_89653110-7e4d-4527-b508-abefd7ca6497_0(043e4853389ca175198386df72f6846f7e7b3b4df6383e061bf17957fd0b6a75): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 21 15:35:16 crc kubenswrapper[5021]: E0121 15:35:16.209129 5021 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-dkftr_crc-storage_89653110-7e4d-4527-b508-abefd7ca6497_0(043e4853389ca175198386df72f6846f7e7b3b4df6383e061bf17957fd0b6a75): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-dkftr" Jan 21 15:35:16 crc kubenswrapper[5021]: E0121 15:35:16.209160 5021 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-dkftr_crc-storage_89653110-7e4d-4527-b508-abefd7ca6497_0(043e4853389ca175198386df72f6846f7e7b3b4df6383e061bf17957fd0b6a75): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-dkftr" Jan 21 15:35:16 crc kubenswrapper[5021]: E0121 15:35:16.209222 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"crc-storage-crc-dkftr_crc-storage(89653110-7e4d-4527-b508-abefd7ca6497)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"crc-storage-crc-dkftr_crc-storage(89653110-7e4d-4527-b508-abefd7ca6497)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-dkftr_crc-storage_89653110-7e4d-4527-b508-abefd7ca6497_0(043e4853389ca175198386df72f6846f7e7b3b4df6383e061bf17957fd0b6a75): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="crc-storage/crc-storage-crc-dkftr" podUID="89653110-7e4d-4527-b508-abefd7ca6497" Jan 21 15:35:16 crc kubenswrapper[5021]: I0121 15:35:16.310411 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-dkftr" Jan 21 15:35:16 crc kubenswrapper[5021]: I0121 15:35:16.311412 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-dkftr" Jan 21 15:35:16 crc kubenswrapper[5021]: E0121 15:35:16.340516 5021 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-dkftr_crc-storage_89653110-7e4d-4527-b508-abefd7ca6497_0(bea68d85d027951fc0f3708e87fb23e2aeda78c7867aff42b7d8ca0ca7aa176e): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 21 15:35:16 crc kubenswrapper[5021]: E0121 15:35:16.340599 5021 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-dkftr_crc-storage_89653110-7e4d-4527-b508-abefd7ca6497_0(bea68d85d027951fc0f3708e87fb23e2aeda78c7867aff42b7d8ca0ca7aa176e): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-dkftr" Jan 21 15:35:16 crc kubenswrapper[5021]: E0121 15:35:16.340628 5021 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-dkftr_crc-storage_89653110-7e4d-4527-b508-abefd7ca6497_0(bea68d85d027951fc0f3708e87fb23e2aeda78c7867aff42b7d8ca0ca7aa176e): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-dkftr" Jan 21 15:35:16 crc kubenswrapper[5021]: E0121 15:35:16.340688 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"crc-storage-crc-dkftr_crc-storage(89653110-7e4d-4527-b508-abefd7ca6497)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"crc-storage-crc-dkftr_crc-storage(89653110-7e4d-4527-b508-abefd7ca6497)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-dkftr_crc-storage_89653110-7e4d-4527-b508-abefd7ca6497_0(bea68d85d027951fc0f3708e87fb23e2aeda78c7867aff42b7d8ca0ca7aa176e): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="crc-storage/crc-storage-crc-dkftr" podUID="89653110-7e4d-4527-b508-abefd7ca6497" Jan 21 15:35:23 crc kubenswrapper[5021]: I0121 15:35:23.737570 5021 scope.go:117] "RemoveContainer" containerID="40ece78bf00f59d0318b189d59d2347204f425bb98265e7412a6ecfb2793009f" Jan 21 15:35:23 crc kubenswrapper[5021]: E0121 15:35:23.738366 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-sd7j9_openshift-multus(49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a)\"" pod="openshift-multus/multus-sd7j9" podUID="49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a" Jan 21 15:35:30 crc kubenswrapper[5021]: I0121 15:35:30.737158 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-dkftr" Jan 21 15:35:30 crc kubenswrapper[5021]: I0121 15:35:30.739163 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-dkftr" Jan 21 15:35:30 crc kubenswrapper[5021]: E0121 15:35:30.764711 5021 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-dkftr_crc-storage_89653110-7e4d-4527-b508-abefd7ca6497_0(251339f72fdcff95a7954e463d9058101a5c76549a7ddbdc97432283f5f1c6be): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 21 15:35:30 crc kubenswrapper[5021]: E0121 15:35:30.764784 5021 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-dkftr_crc-storage_89653110-7e4d-4527-b508-abefd7ca6497_0(251339f72fdcff95a7954e463d9058101a5c76549a7ddbdc97432283f5f1c6be): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-dkftr" Jan 21 15:35:30 crc kubenswrapper[5021]: E0121 15:35:30.764833 5021 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-dkftr_crc-storage_89653110-7e4d-4527-b508-abefd7ca6497_0(251339f72fdcff95a7954e463d9058101a5c76549a7ddbdc97432283f5f1c6be): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-dkftr" Jan 21 15:35:30 crc kubenswrapper[5021]: E0121 15:35:30.764888 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"crc-storage-crc-dkftr_crc-storage(89653110-7e4d-4527-b508-abefd7ca6497)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"crc-storage-crc-dkftr_crc-storage(89653110-7e4d-4527-b508-abefd7ca6497)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-dkftr_crc-storage_89653110-7e4d-4527-b508-abefd7ca6497_0(251339f72fdcff95a7954e463d9058101a5c76549a7ddbdc97432283f5f1c6be): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="crc-storage/crc-storage-crc-dkftr" podUID="89653110-7e4d-4527-b508-abefd7ca6497" Jan 21 15:35:35 crc kubenswrapper[5021]: I0121 15:35:35.738270 5021 scope.go:117] "RemoveContainer" containerID="40ece78bf00f59d0318b189d59d2347204f425bb98265e7412a6ecfb2793009f" Jan 21 15:35:36 crc kubenswrapper[5021]: I0121 15:35:36.437104 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-sd7j9_49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a/kube-multus/2.log" Jan 21 15:35:36 crc kubenswrapper[5021]: I0121 15:35:36.437664 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-sd7j9" event={"ID":"49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a","Type":"ContainerStarted","Data":"9038c8a554cca8cfd0b7a0a9d2239d9d9a116d41ba112d4a7444cdd17ddb948f"} Jan 21 15:35:39 crc kubenswrapper[5021]: I0121 15:35:39.009994 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-rzt28" Jan 21 15:35:42 crc kubenswrapper[5021]: I0121 15:35:42.737055 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-dkftr" Jan 21 15:35:42 crc kubenswrapper[5021]: I0121 15:35:42.737846 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-dkftr" Jan 21 15:35:42 crc kubenswrapper[5021]: I0121 15:35:42.938510 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-dkftr"] Jan 21 15:35:42 crc kubenswrapper[5021]: W0121 15:35:42.947644 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod89653110_7e4d_4527_b508_abefd7ca6497.slice/crio-92637d8b8b6d829d85992fbcff574955cd1dc83977a4fe3bd70d4a0fd64c7f6d WatchSource:0}: Error finding container 92637d8b8b6d829d85992fbcff574955cd1dc83977a4fe3bd70d4a0fd64c7f6d: Status 404 returned error can't find the container with id 92637d8b8b6d829d85992fbcff574955cd1dc83977a4fe3bd70d4a0fd64c7f6d Jan 21 15:35:42 crc kubenswrapper[5021]: I0121 15:35:42.951255 5021 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 21 15:35:43 crc kubenswrapper[5021]: I0121 15:35:43.482160 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-dkftr" event={"ID":"89653110-7e4d-4527-b508-abefd7ca6497","Type":"ContainerStarted","Data":"92637d8b8b6d829d85992fbcff574955cd1dc83977a4fe3bd70d4a0fd64c7f6d"} Jan 21 15:35:46 crc kubenswrapper[5021]: I0121 15:35:46.501258 5021 generic.go:334] "Generic (PLEG): container finished" podID="89653110-7e4d-4527-b508-abefd7ca6497" containerID="363590d610d351ed629d0a91c1255e346f39efc36e2e4634caf21ad888af679d" exitCode=0 Jan 21 15:35:46 crc kubenswrapper[5021]: I0121 15:35:46.501352 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-dkftr" event={"ID":"89653110-7e4d-4527-b508-abefd7ca6497","Type":"ContainerDied","Data":"363590d610d351ed629d0a91c1255e346f39efc36e2e4634caf21ad888af679d"} Jan 21 15:35:47 crc kubenswrapper[5021]: I0121 15:35:47.785281 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-dkftr" Jan 21 15:35:47 crc kubenswrapper[5021]: I0121 15:35:47.844880 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ln4s6\" (UniqueName: \"kubernetes.io/projected/89653110-7e4d-4527-b508-abefd7ca6497-kube-api-access-ln4s6\") pod \"89653110-7e4d-4527-b508-abefd7ca6497\" (UID: \"89653110-7e4d-4527-b508-abefd7ca6497\") " Jan 21 15:35:47 crc kubenswrapper[5021]: I0121 15:35:47.845084 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/89653110-7e4d-4527-b508-abefd7ca6497-crc-storage\") pod \"89653110-7e4d-4527-b508-abefd7ca6497\" (UID: \"89653110-7e4d-4527-b508-abefd7ca6497\") " Jan 21 15:35:47 crc kubenswrapper[5021]: I0121 15:35:47.845137 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/89653110-7e4d-4527-b508-abefd7ca6497-node-mnt\") pod \"89653110-7e4d-4527-b508-abefd7ca6497\" (UID: \"89653110-7e4d-4527-b508-abefd7ca6497\") " Jan 21 15:35:47 crc kubenswrapper[5021]: I0121 15:35:47.845303 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/89653110-7e4d-4527-b508-abefd7ca6497-node-mnt" (OuterVolumeSpecName: "node-mnt") pod "89653110-7e4d-4527-b508-abefd7ca6497" (UID: "89653110-7e4d-4527-b508-abefd7ca6497"). InnerVolumeSpecName "node-mnt". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 15:35:47 crc kubenswrapper[5021]: I0121 15:35:47.853100 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/89653110-7e4d-4527-b508-abefd7ca6497-kube-api-access-ln4s6" (OuterVolumeSpecName: "kube-api-access-ln4s6") pod "89653110-7e4d-4527-b508-abefd7ca6497" (UID: "89653110-7e4d-4527-b508-abefd7ca6497"). InnerVolumeSpecName "kube-api-access-ln4s6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:35:47 crc kubenswrapper[5021]: I0121 15:35:47.866416 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/89653110-7e4d-4527-b508-abefd7ca6497-crc-storage" (OuterVolumeSpecName: "crc-storage") pod "89653110-7e4d-4527-b508-abefd7ca6497" (UID: "89653110-7e4d-4527-b508-abefd7ca6497"). InnerVolumeSpecName "crc-storage". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:35:47 crc kubenswrapper[5021]: I0121 15:35:47.947108 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ln4s6\" (UniqueName: \"kubernetes.io/projected/89653110-7e4d-4527-b508-abefd7ca6497-kube-api-access-ln4s6\") on node \"crc\" DevicePath \"\"" Jan 21 15:35:47 crc kubenswrapper[5021]: I0121 15:35:47.947172 5021 reconciler_common.go:293] "Volume detached for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/89653110-7e4d-4527-b508-abefd7ca6497-crc-storage\") on node \"crc\" DevicePath \"\"" Jan 21 15:35:47 crc kubenswrapper[5021]: I0121 15:35:47.947187 5021 reconciler_common.go:293] "Volume detached for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/89653110-7e4d-4527-b508-abefd7ca6497-node-mnt\") on node \"crc\" DevicePath \"\"" Jan 21 15:35:48 crc kubenswrapper[5021]: I0121 15:35:48.518121 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-dkftr" event={"ID":"89653110-7e4d-4527-b508-abefd7ca6497","Type":"ContainerDied","Data":"92637d8b8b6d829d85992fbcff574955cd1dc83977a4fe3bd70d4a0fd64c7f6d"} Jan 21 15:35:48 crc kubenswrapper[5021]: I0121 15:35:48.518480 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="92637d8b8b6d829d85992fbcff574955cd1dc83977a4fe3bd70d4a0fd64c7f6d" Jan 21 15:35:48 crc kubenswrapper[5021]: I0121 15:35:48.518180 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-dkftr" Jan 21 15:35:55 crc kubenswrapper[5021]: I0121 15:35:55.292683 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713q868r"] Jan 21 15:35:55 crc kubenswrapper[5021]: E0121 15:35:55.293268 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89653110-7e4d-4527-b508-abefd7ca6497" containerName="storage" Jan 21 15:35:55 crc kubenswrapper[5021]: I0121 15:35:55.293283 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="89653110-7e4d-4527-b508-abefd7ca6497" containerName="storage" Jan 21 15:35:55 crc kubenswrapper[5021]: I0121 15:35:55.293413 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="89653110-7e4d-4527-b508-abefd7ca6497" containerName="storage" Jan 21 15:35:55 crc kubenswrapper[5021]: I0121 15:35:55.294292 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713q868r" Jan 21 15:35:55 crc kubenswrapper[5021]: I0121 15:35:55.297018 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 21 15:35:55 crc kubenswrapper[5021]: I0121 15:35:55.306470 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713q868r"] Jan 21 15:35:55 crc kubenswrapper[5021]: I0121 15:35:55.357796 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dcb79\" (UniqueName: \"kubernetes.io/projected/483d13df-3d67-4110-a5a0-7c6d4fde373f-kube-api-access-dcb79\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713q868r\" (UID: \"483d13df-3d67-4110-a5a0-7c6d4fde373f\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713q868r" Jan 21 15:35:55 crc kubenswrapper[5021]: I0121 15:35:55.357865 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/483d13df-3d67-4110-a5a0-7c6d4fde373f-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713q868r\" (UID: \"483d13df-3d67-4110-a5a0-7c6d4fde373f\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713q868r" Jan 21 15:35:55 crc kubenswrapper[5021]: I0121 15:35:55.357942 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/483d13df-3d67-4110-a5a0-7c6d4fde373f-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713q868r\" (UID: \"483d13df-3d67-4110-a5a0-7c6d4fde373f\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713q868r" Jan 21 15:35:55 crc kubenswrapper[5021]: I0121 15:35:55.458742 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/483d13df-3d67-4110-a5a0-7c6d4fde373f-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713q868r\" (UID: \"483d13df-3d67-4110-a5a0-7c6d4fde373f\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713q868r" Jan 21 15:35:55 crc kubenswrapper[5021]: I0121 15:35:55.458830 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/483d13df-3d67-4110-a5a0-7c6d4fde373f-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713q868r\" (UID: \"483d13df-3d67-4110-a5a0-7c6d4fde373f\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713q868r" Jan 21 15:35:55 crc kubenswrapper[5021]: I0121 15:35:55.458902 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dcb79\" (UniqueName: \"kubernetes.io/projected/483d13df-3d67-4110-a5a0-7c6d4fde373f-kube-api-access-dcb79\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713q868r\" (UID: \"483d13df-3d67-4110-a5a0-7c6d4fde373f\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713q868r" Jan 21 15:35:55 crc kubenswrapper[5021]: I0121 15:35:55.459495 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/483d13df-3d67-4110-a5a0-7c6d4fde373f-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713q868r\" (UID: \"483d13df-3d67-4110-a5a0-7c6d4fde373f\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713q868r" Jan 21 15:35:55 crc kubenswrapper[5021]: I0121 15:35:55.459592 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/483d13df-3d67-4110-a5a0-7c6d4fde373f-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713q868r\" (UID: \"483d13df-3d67-4110-a5a0-7c6d4fde373f\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713q868r" Jan 21 15:35:55 crc kubenswrapper[5021]: I0121 15:35:55.483443 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dcb79\" (UniqueName: \"kubernetes.io/projected/483d13df-3d67-4110-a5a0-7c6d4fde373f-kube-api-access-dcb79\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713q868r\" (UID: \"483d13df-3d67-4110-a5a0-7c6d4fde373f\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713q868r" Jan 21 15:35:55 crc kubenswrapper[5021]: I0121 15:35:55.658134 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713q868r" Jan 21 15:35:55 crc kubenswrapper[5021]: I0121 15:35:55.935821 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713q868r"] Jan 21 15:35:56 crc kubenswrapper[5021]: I0121 15:35:56.574192 5021 generic.go:334] "Generic (PLEG): container finished" podID="483d13df-3d67-4110-a5a0-7c6d4fde373f" containerID="052ffc79c8a134ff3372c18824e1738dce7232e8ede3763b12aa113f73d10934" exitCode=0 Jan 21 15:35:56 crc kubenswrapper[5021]: I0121 15:35:56.574408 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713q868r" event={"ID":"483d13df-3d67-4110-a5a0-7c6d4fde373f","Type":"ContainerDied","Data":"052ffc79c8a134ff3372c18824e1738dce7232e8ede3763b12aa113f73d10934"} Jan 21 15:35:56 crc kubenswrapper[5021]: I0121 15:35:56.574710 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713q868r" event={"ID":"483d13df-3d67-4110-a5a0-7c6d4fde373f","Type":"ContainerStarted","Data":"f3f15076a19466731b92442d73382f6ce76b8eead2bba0eeb7f0ca520f9efa69"} Jan 21 15:35:58 crc kubenswrapper[5021]: I0121 15:35:58.589812 5021 generic.go:334] "Generic (PLEG): container finished" podID="483d13df-3d67-4110-a5a0-7c6d4fde373f" containerID="a1d18a988bd09299b266016a8a71d081d5fcefa9669bfdfdce830c9d3feb419f" exitCode=0 Jan 21 15:35:58 crc kubenswrapper[5021]: I0121 15:35:58.589984 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713q868r" event={"ID":"483d13df-3d67-4110-a5a0-7c6d4fde373f","Type":"ContainerDied","Data":"a1d18a988bd09299b266016a8a71d081d5fcefa9669bfdfdce830c9d3feb419f"} Jan 21 15:35:59 crc kubenswrapper[5021]: I0121 15:35:59.599435 5021 generic.go:334] "Generic (PLEG): container finished" podID="483d13df-3d67-4110-a5a0-7c6d4fde373f" containerID="b9321e412f841248a4c2e1a96237e44cee527471155248e26d6dd1e9b5e25d6a" exitCode=0 Jan 21 15:35:59 crc kubenswrapper[5021]: I0121 15:35:59.599542 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713q868r" event={"ID":"483d13df-3d67-4110-a5a0-7c6d4fde373f","Type":"ContainerDied","Data":"b9321e412f841248a4c2e1a96237e44cee527471155248e26d6dd1e9b5e25d6a"} Jan 21 15:36:00 crc kubenswrapper[5021]: I0121 15:36:00.847808 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713q868r" Jan 21 15:36:00 crc kubenswrapper[5021]: I0121 15:36:00.946478 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dcb79\" (UniqueName: \"kubernetes.io/projected/483d13df-3d67-4110-a5a0-7c6d4fde373f-kube-api-access-dcb79\") pod \"483d13df-3d67-4110-a5a0-7c6d4fde373f\" (UID: \"483d13df-3d67-4110-a5a0-7c6d4fde373f\") " Jan 21 15:36:00 crc kubenswrapper[5021]: I0121 15:36:00.946660 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/483d13df-3d67-4110-a5a0-7c6d4fde373f-util\") pod \"483d13df-3d67-4110-a5a0-7c6d4fde373f\" (UID: \"483d13df-3d67-4110-a5a0-7c6d4fde373f\") " Jan 21 15:36:00 crc kubenswrapper[5021]: I0121 15:36:00.946761 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/483d13df-3d67-4110-a5a0-7c6d4fde373f-bundle\") pod \"483d13df-3d67-4110-a5a0-7c6d4fde373f\" (UID: \"483d13df-3d67-4110-a5a0-7c6d4fde373f\") " Jan 21 15:36:00 crc kubenswrapper[5021]: I0121 15:36:00.947880 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/483d13df-3d67-4110-a5a0-7c6d4fde373f-bundle" (OuterVolumeSpecName: "bundle") pod "483d13df-3d67-4110-a5a0-7c6d4fde373f" (UID: "483d13df-3d67-4110-a5a0-7c6d4fde373f"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:36:00 crc kubenswrapper[5021]: I0121 15:36:00.956413 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/483d13df-3d67-4110-a5a0-7c6d4fde373f-kube-api-access-dcb79" (OuterVolumeSpecName: "kube-api-access-dcb79") pod "483d13df-3d67-4110-a5a0-7c6d4fde373f" (UID: "483d13df-3d67-4110-a5a0-7c6d4fde373f"). InnerVolumeSpecName "kube-api-access-dcb79". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:36:00 crc kubenswrapper[5021]: I0121 15:36:00.960353 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/483d13df-3d67-4110-a5a0-7c6d4fde373f-util" (OuterVolumeSpecName: "util") pod "483d13df-3d67-4110-a5a0-7c6d4fde373f" (UID: "483d13df-3d67-4110-a5a0-7c6d4fde373f"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:36:01 crc kubenswrapper[5021]: I0121 15:36:01.049356 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dcb79\" (UniqueName: \"kubernetes.io/projected/483d13df-3d67-4110-a5a0-7c6d4fde373f-kube-api-access-dcb79\") on node \"crc\" DevicePath \"\"" Jan 21 15:36:01 crc kubenswrapper[5021]: I0121 15:36:01.049432 5021 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/483d13df-3d67-4110-a5a0-7c6d4fde373f-util\") on node \"crc\" DevicePath \"\"" Jan 21 15:36:01 crc kubenswrapper[5021]: I0121 15:36:01.049451 5021 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/483d13df-3d67-4110-a5a0-7c6d4fde373f-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:36:01 crc kubenswrapper[5021]: I0121 15:36:01.615657 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713q868r" event={"ID":"483d13df-3d67-4110-a5a0-7c6d4fde373f","Type":"ContainerDied","Data":"f3f15076a19466731b92442d73382f6ce76b8eead2bba0eeb7f0ca520f9efa69"} Jan 21 15:36:01 crc kubenswrapper[5021]: I0121 15:36:01.615733 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f3f15076a19466731b92442d73382f6ce76b8eead2bba0eeb7f0ca520f9efa69" Jan 21 15:36:01 crc kubenswrapper[5021]: I0121 15:36:01.615762 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713q868r" Jan 21 15:36:02 crc kubenswrapper[5021]: I0121 15:36:02.968145 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-ppc7l"] Jan 21 15:36:02 crc kubenswrapper[5021]: E0121 15:36:02.968535 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="483d13df-3d67-4110-a5a0-7c6d4fde373f" containerName="pull" Jan 21 15:36:02 crc kubenswrapper[5021]: I0121 15:36:02.968557 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="483d13df-3d67-4110-a5a0-7c6d4fde373f" containerName="pull" Jan 21 15:36:02 crc kubenswrapper[5021]: E0121 15:36:02.968577 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="483d13df-3d67-4110-a5a0-7c6d4fde373f" containerName="util" Jan 21 15:36:02 crc kubenswrapper[5021]: I0121 15:36:02.968588 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="483d13df-3d67-4110-a5a0-7c6d4fde373f" containerName="util" Jan 21 15:36:02 crc kubenswrapper[5021]: E0121 15:36:02.968609 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="483d13df-3d67-4110-a5a0-7c6d4fde373f" containerName="extract" Jan 21 15:36:02 crc kubenswrapper[5021]: I0121 15:36:02.968618 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="483d13df-3d67-4110-a5a0-7c6d4fde373f" containerName="extract" Jan 21 15:36:02 crc kubenswrapper[5021]: I0121 15:36:02.968755 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="483d13df-3d67-4110-a5a0-7c6d4fde373f" containerName="extract" Jan 21 15:36:02 crc kubenswrapper[5021]: I0121 15:36:02.969358 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-646758c888-ppc7l" Jan 21 15:36:02 crc kubenswrapper[5021]: I0121 15:36:02.972342 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-k6lp4" Jan 21 15:36:02 crc kubenswrapper[5021]: I0121 15:36:02.973373 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Jan 21 15:36:02 crc kubenswrapper[5021]: I0121 15:36:02.973467 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Jan 21 15:36:02 crc kubenswrapper[5021]: I0121 15:36:02.978499 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-ppc7l"] Jan 21 15:36:03 crc kubenswrapper[5021]: I0121 15:36:03.076972 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f6d8m\" (UniqueName: \"kubernetes.io/projected/0db957d4-1bed-4f2b-ac99-f64e313dc52b-kube-api-access-f6d8m\") pod \"nmstate-operator-646758c888-ppc7l\" (UID: \"0db957d4-1bed-4f2b-ac99-f64e313dc52b\") " pod="openshift-nmstate/nmstate-operator-646758c888-ppc7l" Jan 21 15:36:03 crc kubenswrapper[5021]: I0121 15:36:03.178332 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f6d8m\" (UniqueName: \"kubernetes.io/projected/0db957d4-1bed-4f2b-ac99-f64e313dc52b-kube-api-access-f6d8m\") pod \"nmstate-operator-646758c888-ppc7l\" (UID: \"0db957d4-1bed-4f2b-ac99-f64e313dc52b\") " pod="openshift-nmstate/nmstate-operator-646758c888-ppc7l" Jan 21 15:36:03 crc kubenswrapper[5021]: I0121 15:36:03.198120 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f6d8m\" (UniqueName: \"kubernetes.io/projected/0db957d4-1bed-4f2b-ac99-f64e313dc52b-kube-api-access-f6d8m\") pod \"nmstate-operator-646758c888-ppc7l\" (UID: \"0db957d4-1bed-4f2b-ac99-f64e313dc52b\") " pod="openshift-nmstate/nmstate-operator-646758c888-ppc7l" Jan 21 15:36:03 crc kubenswrapper[5021]: I0121 15:36:03.286386 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-646758c888-ppc7l" Jan 21 15:36:03 crc kubenswrapper[5021]: I0121 15:36:03.495572 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-ppc7l"] Jan 21 15:36:03 crc kubenswrapper[5021]: W0121 15:36:03.507286 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0db957d4_1bed_4f2b_ac99_f64e313dc52b.slice/crio-9e4adfc4efa2bf4d01e331bff9d38a3f33b2c8962321a5ce391e7f607cbae269 WatchSource:0}: Error finding container 9e4adfc4efa2bf4d01e331bff9d38a3f33b2c8962321a5ce391e7f607cbae269: Status 404 returned error can't find the container with id 9e4adfc4efa2bf4d01e331bff9d38a3f33b2c8962321a5ce391e7f607cbae269 Jan 21 15:36:03 crc kubenswrapper[5021]: I0121 15:36:03.628278 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-646758c888-ppc7l" event={"ID":"0db957d4-1bed-4f2b-ac99-f64e313dc52b","Type":"ContainerStarted","Data":"9e4adfc4efa2bf4d01e331bff9d38a3f33b2c8962321a5ce391e7f607cbae269"} Jan 21 15:36:06 crc kubenswrapper[5021]: I0121 15:36:06.743722 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-646758c888-ppc7l" event={"ID":"0db957d4-1bed-4f2b-ac99-f64e313dc52b","Type":"ContainerStarted","Data":"78bb84e6be838df392b3be90d5ad593e3687d8d59718644158d7942b29b97ad3"} Jan 21 15:36:06 crc kubenswrapper[5021]: I0121 15:36:06.755825 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-646758c888-ppc7l" podStartSLOduration=1.997070289 podStartE2EDuration="4.755810467s" podCreationTimestamp="2026-01-21 15:36:02 +0000 UTC" firstStartedPulling="2026-01-21 15:36:03.512682977 +0000 UTC m=+705.047796866" lastFinishedPulling="2026-01-21 15:36:06.271423155 +0000 UTC m=+707.806537044" observedRunningTime="2026-01-21 15:36:06.753719829 +0000 UTC m=+708.288833738" watchObservedRunningTime="2026-01-21 15:36:06.755810467 +0000 UTC m=+708.290924356" Jan 21 15:36:07 crc kubenswrapper[5021]: I0121 15:36:07.721853 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-xdczs"] Jan 21 15:36:07 crc kubenswrapper[5021]: I0121 15:36:07.722753 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-54757c584b-xdczs" Jan 21 15:36:07 crc kubenswrapper[5021]: I0121 15:36:07.724884 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-fjdw4" Jan 21 15:36:07 crc kubenswrapper[5021]: I0121 15:36:07.742677 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-xdczs"] Jan 21 15:36:07 crc kubenswrapper[5021]: I0121 15:36:07.751211 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-b8kws"] Jan 21 15:36:07 crc kubenswrapper[5021]: I0121 15:36:07.752177 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-b8kws" Jan 21 15:36:07 crc kubenswrapper[5021]: I0121 15:36:07.755311 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Jan 21 15:36:07 crc kubenswrapper[5021]: I0121 15:36:07.758692 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-b8kws"] Jan 21 15:36:07 crc kubenswrapper[5021]: I0121 15:36:07.775255 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-5mp2z"] Jan 21 15:36:07 crc kubenswrapper[5021]: I0121 15:36:07.776799 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-5mp2z" Jan 21 15:36:07 crc kubenswrapper[5021]: I0121 15:36:07.844531 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/4eeb4ade-bc6a-499d-b300-7e4feae201cb-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-b8kws\" (UID: \"4eeb4ade-bc6a-499d-b300-7e4feae201cb\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-b8kws" Jan 21 15:36:07 crc kubenswrapper[5021]: I0121 15:36:07.844827 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ct2m6\" (UniqueName: \"kubernetes.io/projected/4eeb4ade-bc6a-499d-b300-7e4feae201cb-kube-api-access-ct2m6\") pod \"nmstate-webhook-8474b5b9d8-b8kws\" (UID: \"4eeb4ade-bc6a-499d-b300-7e4feae201cb\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-b8kws" Jan 21 15:36:07 crc kubenswrapper[5021]: I0121 15:36:07.845052 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mxqz9\" (UniqueName: \"kubernetes.io/projected/2b5cda33-cd29-42cc-a1ba-ef98996815d7-kube-api-access-mxqz9\") pod \"nmstate-metrics-54757c584b-xdczs\" (UID: \"2b5cda33-cd29-42cc-a1ba-ef98996815d7\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-xdczs" Jan 21 15:36:07 crc kubenswrapper[5021]: I0121 15:36:07.865161 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-g5kll"] Jan 21 15:36:07 crc kubenswrapper[5021]: I0121 15:36:07.866052 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-g5kll" Jan 21 15:36:07 crc kubenswrapper[5021]: I0121 15:36:07.867844 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Jan 21 15:36:07 crc kubenswrapper[5021]: I0121 15:36:07.867929 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-sxqs7" Jan 21 15:36:07 crc kubenswrapper[5021]: I0121 15:36:07.868467 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Jan 21 15:36:07 crc kubenswrapper[5021]: I0121 15:36:07.879345 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-g5kll"] Jan 21 15:36:07 crc kubenswrapper[5021]: I0121 15:36:07.946498 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/4eeb4ade-bc6a-499d-b300-7e4feae201cb-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-b8kws\" (UID: \"4eeb4ade-bc6a-499d-b300-7e4feae201cb\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-b8kws" Jan 21 15:36:07 crc kubenswrapper[5021]: I0121 15:36:07.946558 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b2qtk\" (UniqueName: \"kubernetes.io/projected/28848e86-4658-438b-8334-5cc756cab803-kube-api-access-b2qtk\") pod \"nmstate-handler-5mp2z\" (UID: \"28848e86-4658-438b-8334-5cc756cab803\") " pod="openshift-nmstate/nmstate-handler-5mp2z" Jan 21 15:36:07 crc kubenswrapper[5021]: I0121 15:36:07.946581 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ct2m6\" (UniqueName: \"kubernetes.io/projected/4eeb4ade-bc6a-499d-b300-7e4feae201cb-kube-api-access-ct2m6\") pod \"nmstate-webhook-8474b5b9d8-b8kws\" (UID: \"4eeb4ade-bc6a-499d-b300-7e4feae201cb\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-b8kws" Jan 21 15:36:07 crc kubenswrapper[5021]: I0121 15:36:07.946604 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/28848e86-4658-438b-8334-5cc756cab803-nmstate-lock\") pod \"nmstate-handler-5mp2z\" (UID: \"28848e86-4658-438b-8334-5cc756cab803\") " pod="openshift-nmstate/nmstate-handler-5mp2z" Jan 21 15:36:07 crc kubenswrapper[5021]: I0121 15:36:07.946637 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mxqz9\" (UniqueName: \"kubernetes.io/projected/2b5cda33-cd29-42cc-a1ba-ef98996815d7-kube-api-access-mxqz9\") pod \"nmstate-metrics-54757c584b-xdczs\" (UID: \"2b5cda33-cd29-42cc-a1ba-ef98996815d7\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-xdczs" Jan 21 15:36:07 crc kubenswrapper[5021]: I0121 15:36:07.946657 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/28848e86-4658-438b-8334-5cc756cab803-ovs-socket\") pod \"nmstate-handler-5mp2z\" (UID: \"28848e86-4658-438b-8334-5cc756cab803\") " pod="openshift-nmstate/nmstate-handler-5mp2z" Jan 21 15:36:07 crc kubenswrapper[5021]: I0121 15:36:07.946686 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/28848e86-4658-438b-8334-5cc756cab803-dbus-socket\") pod \"nmstate-handler-5mp2z\" (UID: \"28848e86-4658-438b-8334-5cc756cab803\") " pod="openshift-nmstate/nmstate-handler-5mp2z" Jan 21 15:36:07 crc kubenswrapper[5021]: I0121 15:36:07.953613 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/4eeb4ade-bc6a-499d-b300-7e4feae201cb-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-b8kws\" (UID: \"4eeb4ade-bc6a-499d-b300-7e4feae201cb\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-b8kws" Jan 21 15:36:07 crc kubenswrapper[5021]: I0121 15:36:07.963385 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mxqz9\" (UniqueName: \"kubernetes.io/projected/2b5cda33-cd29-42cc-a1ba-ef98996815d7-kube-api-access-mxqz9\") pod \"nmstate-metrics-54757c584b-xdczs\" (UID: \"2b5cda33-cd29-42cc-a1ba-ef98996815d7\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-xdczs" Jan 21 15:36:07 crc kubenswrapper[5021]: I0121 15:36:07.963889 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ct2m6\" (UniqueName: \"kubernetes.io/projected/4eeb4ade-bc6a-499d-b300-7e4feae201cb-kube-api-access-ct2m6\") pod \"nmstate-webhook-8474b5b9d8-b8kws\" (UID: \"4eeb4ade-bc6a-499d-b300-7e4feae201cb\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-b8kws" Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.039157 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-54757c584b-xdczs" Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.041079 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-6b648576cb-b5qg6"] Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.042013 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-6b648576cb-b5qg6" Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.047583 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/28848e86-4658-438b-8334-5cc756cab803-nmstate-lock\") pod \"nmstate-handler-5mp2z\" (UID: \"28848e86-4658-438b-8334-5cc756cab803\") " pod="openshift-nmstate/nmstate-handler-5mp2z" Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.047654 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/28848e86-4658-438b-8334-5cc756cab803-ovs-socket\") pod \"nmstate-handler-5mp2z\" (UID: \"28848e86-4658-438b-8334-5cc756cab803\") " pod="openshift-nmstate/nmstate-handler-5mp2z" Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.047692 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/a174b542-9e00-432f-bb4a-62bb0118d792-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-g5kll\" (UID: \"a174b542-9e00-432f-bb4a-62bb0118d792\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-g5kll" Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.047729 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/28848e86-4658-438b-8334-5cc756cab803-dbus-socket\") pod \"nmstate-handler-5mp2z\" (UID: \"28848e86-4658-438b-8334-5cc756cab803\") " pod="openshift-nmstate/nmstate-handler-5mp2z" Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.047755 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-44wkh\" (UniqueName: \"kubernetes.io/projected/a174b542-9e00-432f-bb4a-62bb0118d792-kube-api-access-44wkh\") pod \"nmstate-console-plugin-7754f76f8b-g5kll\" (UID: \"a174b542-9e00-432f-bb4a-62bb0118d792\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-g5kll" Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.047778 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/a174b542-9e00-432f-bb4a-62bb0118d792-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-g5kll\" (UID: \"a174b542-9e00-432f-bb4a-62bb0118d792\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-g5kll" Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.047836 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b2qtk\" (UniqueName: \"kubernetes.io/projected/28848e86-4658-438b-8334-5cc756cab803-kube-api-access-b2qtk\") pod \"nmstate-handler-5mp2z\" (UID: \"28848e86-4658-438b-8334-5cc756cab803\") " pod="openshift-nmstate/nmstate-handler-5mp2z" Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.048392 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/28848e86-4658-438b-8334-5cc756cab803-dbus-socket\") pod \"nmstate-handler-5mp2z\" (UID: \"28848e86-4658-438b-8334-5cc756cab803\") " pod="openshift-nmstate/nmstate-handler-5mp2z" Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.048451 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/28848e86-4658-438b-8334-5cc756cab803-nmstate-lock\") pod \"nmstate-handler-5mp2z\" (UID: \"28848e86-4658-438b-8334-5cc756cab803\") " pod="openshift-nmstate/nmstate-handler-5mp2z" Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.048519 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/28848e86-4658-438b-8334-5cc756cab803-ovs-socket\") pod \"nmstate-handler-5mp2z\" (UID: \"28848e86-4658-438b-8334-5cc756cab803\") " pod="openshift-nmstate/nmstate-handler-5mp2z" Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.066731 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-b8kws" Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.080621 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b2qtk\" (UniqueName: \"kubernetes.io/projected/28848e86-4658-438b-8334-5cc756cab803-kube-api-access-b2qtk\") pod \"nmstate-handler-5mp2z\" (UID: \"28848e86-4658-438b-8334-5cc756cab803\") " pod="openshift-nmstate/nmstate-handler-5mp2z" Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.086671 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-6b648576cb-b5qg6"] Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.097955 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-5mp2z" Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.149241 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6lrxd\" (UniqueName: \"kubernetes.io/projected/60866892-37c7-418f-bb79-f42748d50cf7-kube-api-access-6lrxd\") pod \"console-6b648576cb-b5qg6\" (UID: \"60866892-37c7-418f-bb79-f42748d50cf7\") " pod="openshift-console/console-6b648576cb-b5qg6" Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.149294 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/60866892-37c7-418f-bb79-f42748d50cf7-console-oauth-config\") pod \"console-6b648576cb-b5qg6\" (UID: \"60866892-37c7-418f-bb79-f42748d50cf7\") " pod="openshift-console/console-6b648576cb-b5qg6" Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.149316 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/60866892-37c7-418f-bb79-f42748d50cf7-oauth-serving-cert\") pod \"console-6b648576cb-b5qg6\" (UID: \"60866892-37c7-418f-bb79-f42748d50cf7\") " pod="openshift-console/console-6b648576cb-b5qg6" Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.149345 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/60866892-37c7-418f-bb79-f42748d50cf7-service-ca\") pod \"console-6b648576cb-b5qg6\" (UID: \"60866892-37c7-418f-bb79-f42748d50cf7\") " pod="openshift-console/console-6b648576cb-b5qg6" Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.149362 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/60866892-37c7-418f-bb79-f42748d50cf7-console-config\") pod \"console-6b648576cb-b5qg6\" (UID: \"60866892-37c7-418f-bb79-f42748d50cf7\") " pod="openshift-console/console-6b648576cb-b5qg6" Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.149375 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/60866892-37c7-418f-bb79-f42748d50cf7-trusted-ca-bundle\") pod \"console-6b648576cb-b5qg6\" (UID: \"60866892-37c7-418f-bb79-f42748d50cf7\") " pod="openshift-console/console-6b648576cb-b5qg6" Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.149395 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/60866892-37c7-418f-bb79-f42748d50cf7-console-serving-cert\") pod \"console-6b648576cb-b5qg6\" (UID: \"60866892-37c7-418f-bb79-f42748d50cf7\") " pod="openshift-console/console-6b648576cb-b5qg6" Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.149419 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/a174b542-9e00-432f-bb4a-62bb0118d792-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-g5kll\" (UID: \"a174b542-9e00-432f-bb4a-62bb0118d792\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-g5kll" Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.149442 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-44wkh\" (UniqueName: \"kubernetes.io/projected/a174b542-9e00-432f-bb4a-62bb0118d792-kube-api-access-44wkh\") pod \"nmstate-console-plugin-7754f76f8b-g5kll\" (UID: \"a174b542-9e00-432f-bb4a-62bb0118d792\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-g5kll" Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.149487 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/a174b542-9e00-432f-bb4a-62bb0118d792-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-g5kll\" (UID: \"a174b542-9e00-432f-bb4a-62bb0118d792\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-g5kll" Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.151924 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/a174b542-9e00-432f-bb4a-62bb0118d792-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-g5kll\" (UID: \"a174b542-9e00-432f-bb4a-62bb0118d792\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-g5kll" Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.161068 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/a174b542-9e00-432f-bb4a-62bb0118d792-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-g5kll\" (UID: \"a174b542-9e00-432f-bb4a-62bb0118d792\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-g5kll" Jan 21 15:36:08 crc kubenswrapper[5021]: W0121 15:36:08.162270 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod28848e86_4658_438b_8334_5cc756cab803.slice/crio-d0d0cedecd7ef6ecd0924ceb11a9730bcccd0d040255586d5fc402f563bf5a41 WatchSource:0}: Error finding container d0d0cedecd7ef6ecd0924ceb11a9730bcccd0d040255586d5fc402f563bf5a41: Status 404 returned error can't find the container with id d0d0cedecd7ef6ecd0924ceb11a9730bcccd0d040255586d5fc402f563bf5a41 Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.172038 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-44wkh\" (UniqueName: \"kubernetes.io/projected/a174b542-9e00-432f-bb4a-62bb0118d792-kube-api-access-44wkh\") pod \"nmstate-console-plugin-7754f76f8b-g5kll\" (UID: \"a174b542-9e00-432f-bb4a-62bb0118d792\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-g5kll" Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.196987 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-g5kll" Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.251083 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6lrxd\" (UniqueName: \"kubernetes.io/projected/60866892-37c7-418f-bb79-f42748d50cf7-kube-api-access-6lrxd\") pod \"console-6b648576cb-b5qg6\" (UID: \"60866892-37c7-418f-bb79-f42748d50cf7\") " pod="openshift-console/console-6b648576cb-b5qg6" Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.251133 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/60866892-37c7-418f-bb79-f42748d50cf7-console-oauth-config\") pod \"console-6b648576cb-b5qg6\" (UID: \"60866892-37c7-418f-bb79-f42748d50cf7\") " pod="openshift-console/console-6b648576cb-b5qg6" Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.251160 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/60866892-37c7-418f-bb79-f42748d50cf7-oauth-serving-cert\") pod \"console-6b648576cb-b5qg6\" (UID: \"60866892-37c7-418f-bb79-f42748d50cf7\") " pod="openshift-console/console-6b648576cb-b5qg6" Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.251183 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/60866892-37c7-418f-bb79-f42748d50cf7-service-ca\") pod \"console-6b648576cb-b5qg6\" (UID: \"60866892-37c7-418f-bb79-f42748d50cf7\") " pod="openshift-console/console-6b648576cb-b5qg6" Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.251201 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/60866892-37c7-418f-bb79-f42748d50cf7-console-config\") pod \"console-6b648576cb-b5qg6\" (UID: \"60866892-37c7-418f-bb79-f42748d50cf7\") " pod="openshift-console/console-6b648576cb-b5qg6" Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.251215 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/60866892-37c7-418f-bb79-f42748d50cf7-trusted-ca-bundle\") pod \"console-6b648576cb-b5qg6\" (UID: \"60866892-37c7-418f-bb79-f42748d50cf7\") " pod="openshift-console/console-6b648576cb-b5qg6" Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.251235 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/60866892-37c7-418f-bb79-f42748d50cf7-console-serving-cert\") pod \"console-6b648576cb-b5qg6\" (UID: \"60866892-37c7-418f-bb79-f42748d50cf7\") " pod="openshift-console/console-6b648576cb-b5qg6" Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.254179 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/60866892-37c7-418f-bb79-f42748d50cf7-console-config\") pod \"console-6b648576cb-b5qg6\" (UID: \"60866892-37c7-418f-bb79-f42748d50cf7\") " pod="openshift-console/console-6b648576cb-b5qg6" Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.255028 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/60866892-37c7-418f-bb79-f42748d50cf7-trusted-ca-bundle\") pod \"console-6b648576cb-b5qg6\" (UID: \"60866892-37c7-418f-bb79-f42748d50cf7\") " pod="openshift-console/console-6b648576cb-b5qg6" Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.257590 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/60866892-37c7-418f-bb79-f42748d50cf7-console-serving-cert\") pod \"console-6b648576cb-b5qg6\" (UID: \"60866892-37c7-418f-bb79-f42748d50cf7\") " pod="openshift-console/console-6b648576cb-b5qg6" Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.258266 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/60866892-37c7-418f-bb79-f42748d50cf7-oauth-serving-cert\") pod \"console-6b648576cb-b5qg6\" (UID: \"60866892-37c7-418f-bb79-f42748d50cf7\") " pod="openshift-console/console-6b648576cb-b5qg6" Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.258807 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/60866892-37c7-418f-bb79-f42748d50cf7-service-ca\") pod \"console-6b648576cb-b5qg6\" (UID: \"60866892-37c7-418f-bb79-f42748d50cf7\") " pod="openshift-console/console-6b648576cb-b5qg6" Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.260119 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/60866892-37c7-418f-bb79-f42748d50cf7-console-oauth-config\") pod \"console-6b648576cb-b5qg6\" (UID: \"60866892-37c7-418f-bb79-f42748d50cf7\") " pod="openshift-console/console-6b648576cb-b5qg6" Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.280975 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6lrxd\" (UniqueName: \"kubernetes.io/projected/60866892-37c7-418f-bb79-f42748d50cf7-kube-api-access-6lrxd\") pod \"console-6b648576cb-b5qg6\" (UID: \"60866892-37c7-418f-bb79-f42748d50cf7\") " pod="openshift-console/console-6b648576cb-b5qg6" Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.311968 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-xdczs"] Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.342726 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-b8kws"] Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.360569 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-6b648576cb-b5qg6" Jan 21 15:36:08 crc kubenswrapper[5021]: W0121 15:36:08.376014 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4eeb4ade_bc6a_499d_b300_7e4feae201cb.slice/crio-ef73b4f8e451f9c8c2d4812b161c8210e66d638ffa55e705e127b431cd5d3bce WatchSource:0}: Error finding container ef73b4f8e451f9c8c2d4812b161c8210e66d638ffa55e705e127b431cd5d3bce: Status 404 returned error can't find the container with id ef73b4f8e451f9c8c2d4812b161c8210e66d638ffa55e705e127b431cd5d3bce Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.453932 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-g5kll"] Jan 21 15:36:08 crc kubenswrapper[5021]: W0121 15:36:08.473435 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda174b542_9e00_432f_bb4a_62bb0118d792.slice/crio-054ac26a03642171efc4c0219623d5fd10a4fff584589490231c7173e4b841e4 WatchSource:0}: Error finding container 054ac26a03642171efc4c0219623d5fd10a4fff584589490231c7173e4b841e4: Status 404 returned error can't find the container with id 054ac26a03642171efc4c0219623d5fd10a4fff584589490231c7173e4b841e4 Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.554487 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-6b648576cb-b5qg6"] Jan 21 15:36:08 crc kubenswrapper[5021]: W0121 15:36:08.558201 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod60866892_37c7_418f_bb79_f42748d50cf7.slice/crio-914fb3d988954672050e4a1d682eea696b3a6c531cb97e19d2a8261363d6d38e WatchSource:0}: Error finding container 914fb3d988954672050e4a1d682eea696b3a6c531cb97e19d2a8261363d6d38e: Status 404 returned error can't find the container with id 914fb3d988954672050e4a1d682eea696b3a6c531cb97e19d2a8261363d6d38e Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.759504 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-5mp2z" event={"ID":"28848e86-4658-438b-8334-5cc756cab803","Type":"ContainerStarted","Data":"d0d0cedecd7ef6ecd0924ceb11a9730bcccd0d040255586d5fc402f563bf5a41"} Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.761378 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-xdczs" event={"ID":"2b5cda33-cd29-42cc-a1ba-ef98996815d7","Type":"ContainerStarted","Data":"c5c647d7e2a18f3cef79cf82f43a6125bb70f293c945d46686ec54f26bdcfcc1"} Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.762569 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-g5kll" event={"ID":"a174b542-9e00-432f-bb4a-62bb0118d792","Type":"ContainerStarted","Data":"054ac26a03642171efc4c0219623d5fd10a4fff584589490231c7173e4b841e4"} Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.763799 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-6b648576cb-b5qg6" event={"ID":"60866892-37c7-418f-bb79-f42748d50cf7","Type":"ContainerStarted","Data":"cf2df139d46bcba4e1d1ae5b4d97cb88099a5f4b225cbd88da329f2e8d81fb4d"} Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.763830 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-6b648576cb-b5qg6" event={"ID":"60866892-37c7-418f-bb79-f42748d50cf7","Type":"ContainerStarted","Data":"914fb3d988954672050e4a1d682eea696b3a6c531cb97e19d2a8261363d6d38e"} Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.764977 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-b8kws" event={"ID":"4eeb4ade-bc6a-499d-b300-7e4feae201cb","Type":"ContainerStarted","Data":"ef73b4f8e451f9c8c2d4812b161c8210e66d638ffa55e705e127b431cd5d3bce"} Jan 21 15:36:08 crc kubenswrapper[5021]: I0121 15:36:08.835704 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-6b648576cb-b5qg6" podStartSLOduration=0.83568655 podStartE2EDuration="835.68655ms" podCreationTimestamp="2026-01-21 15:36:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:36:08.833088718 +0000 UTC m=+710.368202607" watchObservedRunningTime="2026-01-21 15:36:08.83568655 +0000 UTC m=+710.370800439" Jan 21 15:36:11 crc kubenswrapper[5021]: I0121 15:36:11.787715 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-5mp2z" event={"ID":"28848e86-4658-438b-8334-5cc756cab803","Type":"ContainerStarted","Data":"8966fcbb19b15721362697e75ab86bc6a68e7dd17a6cd8302ad7f0d57e7cd0e7"} Jan 21 15:36:11 crc kubenswrapper[5021]: I0121 15:36:11.788845 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-5mp2z" Jan 21 15:36:11 crc kubenswrapper[5021]: I0121 15:36:11.790817 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-xdczs" event={"ID":"2b5cda33-cd29-42cc-a1ba-ef98996815d7","Type":"ContainerStarted","Data":"3cdb7babeea3e7b0214c7e609bac7f6d0d3ce99c4bf45e75a2db0c8d28c41500"} Jan 21 15:36:11 crc kubenswrapper[5021]: I0121 15:36:11.793568 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-g5kll" event={"ID":"a174b542-9e00-432f-bb4a-62bb0118d792","Type":"ContainerStarted","Data":"71a23aec55be842012634ab912b48d0afd370128cbb939f918c88817c0860cfc"} Jan 21 15:36:11 crc kubenswrapper[5021]: I0121 15:36:11.795096 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-b8kws" event={"ID":"4eeb4ade-bc6a-499d-b300-7e4feae201cb","Type":"ContainerStarted","Data":"25e6c611b9b82e18667a1e6ff48e3cd00f6a30cf601e5d9536058c57375b6947"} Jan 21 15:36:11 crc kubenswrapper[5021]: I0121 15:36:11.795346 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-b8kws" Jan 21 15:36:11 crc kubenswrapper[5021]: I0121 15:36:11.857150 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-5mp2z" podStartSLOduration=1.886340095 podStartE2EDuration="4.857132663s" podCreationTimestamp="2026-01-21 15:36:07 +0000 UTC" firstStartedPulling="2026-01-21 15:36:08.16381723 +0000 UTC m=+709.698931119" lastFinishedPulling="2026-01-21 15:36:11.134609798 +0000 UTC m=+712.669723687" observedRunningTime="2026-01-21 15:36:11.856876426 +0000 UTC m=+713.391990315" watchObservedRunningTime="2026-01-21 15:36:11.857132663 +0000 UTC m=+713.392246552" Jan 21 15:36:11 crc kubenswrapper[5021]: I0121 15:36:11.886578 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-b8kws" podStartSLOduration=2.13758602 podStartE2EDuration="4.886547308s" podCreationTimestamp="2026-01-21 15:36:07 +0000 UTC" firstStartedPulling="2026-01-21 15:36:08.38490036 +0000 UTC m=+709.920014249" lastFinishedPulling="2026-01-21 15:36:11.133861648 +0000 UTC m=+712.668975537" observedRunningTime="2026-01-21 15:36:11.880520582 +0000 UTC m=+713.415634481" watchObservedRunningTime="2026-01-21 15:36:11.886547308 +0000 UTC m=+713.421661207" Jan 21 15:36:11 crc kubenswrapper[5021]: I0121 15:36:11.905974 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-g5kll" podStartSLOduration=2.246846521 podStartE2EDuration="4.905946216s" podCreationTimestamp="2026-01-21 15:36:07 +0000 UTC" firstStartedPulling="2026-01-21 15:36:08.476296875 +0000 UTC m=+710.011410764" lastFinishedPulling="2026-01-21 15:36:11.13539657 +0000 UTC m=+712.670510459" observedRunningTime="2026-01-21 15:36:11.903454517 +0000 UTC m=+713.438568406" watchObservedRunningTime="2026-01-21 15:36:11.905946216 +0000 UTC m=+713.441060115" Jan 21 15:36:13 crc kubenswrapper[5021]: I0121 15:36:13.812508 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-xdczs" event={"ID":"2b5cda33-cd29-42cc-a1ba-ef98996815d7","Type":"ContainerStarted","Data":"b01b49097e6220d5b7cca7143b22342d752d8aa6e0d9c5f5a7b38ce182891fdc"} Jan 21 15:36:13 crc kubenswrapper[5021]: I0121 15:36:13.833568 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-54757c584b-xdczs" podStartSLOduration=1.6651700520000001 podStartE2EDuration="6.833549547s" podCreationTimestamp="2026-01-21 15:36:07 +0000 UTC" firstStartedPulling="2026-01-21 15:36:08.32969318 +0000 UTC m=+709.864807069" lastFinishedPulling="2026-01-21 15:36:13.498072655 +0000 UTC m=+715.033186564" observedRunningTime="2026-01-21 15:36:13.831557662 +0000 UTC m=+715.366671571" watchObservedRunningTime="2026-01-21 15:36:13.833549547 +0000 UTC m=+715.368663446" Jan 21 15:36:18 crc kubenswrapper[5021]: I0121 15:36:18.135711 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-5mp2z" Jan 21 15:36:18 crc kubenswrapper[5021]: I0121 15:36:18.361338 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-6b648576cb-b5qg6" Jan 21 15:36:18 crc kubenswrapper[5021]: I0121 15:36:18.361396 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-6b648576cb-b5qg6" Jan 21 15:36:18 crc kubenswrapper[5021]: I0121 15:36:18.365451 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-6b648576cb-b5qg6" Jan 21 15:36:18 crc kubenswrapper[5021]: I0121 15:36:18.856504 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-6b648576cb-b5qg6" Jan 21 15:36:18 crc kubenswrapper[5021]: I0121 15:36:18.932759 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-92qbd"] Jan 21 15:36:28 crc kubenswrapper[5021]: I0121 15:36:28.075113 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-b8kws" Jan 21 15:36:42 crc kubenswrapper[5021]: I0121 15:36:42.356982 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 15:36:42 crc kubenswrapper[5021]: I0121 15:36:42.357598 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 15:36:42 crc kubenswrapper[5021]: I0121 15:36:42.562310 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7t22c"] Jan 21 15:36:42 crc kubenswrapper[5021]: I0121 15:36:42.563425 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7t22c" Jan 21 15:36:42 crc kubenswrapper[5021]: I0121 15:36:42.565394 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 21 15:36:42 crc kubenswrapper[5021]: I0121 15:36:42.571180 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7t22c"] Jan 21 15:36:42 crc kubenswrapper[5021]: I0121 15:36:42.736011 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/55ddc891-1df3-4fbb-8491-48d0bebf2b65-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7t22c\" (UID: \"55ddc891-1df3-4fbb-8491-48d0bebf2b65\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7t22c" Jan 21 15:36:42 crc kubenswrapper[5021]: I0121 15:36:42.736158 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tmk7r\" (UniqueName: \"kubernetes.io/projected/55ddc891-1df3-4fbb-8491-48d0bebf2b65-kube-api-access-tmk7r\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7t22c\" (UID: \"55ddc891-1df3-4fbb-8491-48d0bebf2b65\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7t22c" Jan 21 15:36:42 crc kubenswrapper[5021]: I0121 15:36:42.736241 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/55ddc891-1df3-4fbb-8491-48d0bebf2b65-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7t22c\" (UID: \"55ddc891-1df3-4fbb-8491-48d0bebf2b65\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7t22c" Jan 21 15:36:42 crc kubenswrapper[5021]: I0121 15:36:42.836995 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tmk7r\" (UniqueName: \"kubernetes.io/projected/55ddc891-1df3-4fbb-8491-48d0bebf2b65-kube-api-access-tmk7r\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7t22c\" (UID: \"55ddc891-1df3-4fbb-8491-48d0bebf2b65\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7t22c" Jan 21 15:36:42 crc kubenswrapper[5021]: I0121 15:36:42.837235 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/55ddc891-1df3-4fbb-8491-48d0bebf2b65-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7t22c\" (UID: \"55ddc891-1df3-4fbb-8491-48d0bebf2b65\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7t22c" Jan 21 15:36:42 crc kubenswrapper[5021]: I0121 15:36:42.837325 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/55ddc891-1df3-4fbb-8491-48d0bebf2b65-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7t22c\" (UID: \"55ddc891-1df3-4fbb-8491-48d0bebf2b65\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7t22c" Jan 21 15:36:42 crc kubenswrapper[5021]: I0121 15:36:42.837985 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/55ddc891-1df3-4fbb-8491-48d0bebf2b65-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7t22c\" (UID: \"55ddc891-1df3-4fbb-8491-48d0bebf2b65\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7t22c" Jan 21 15:36:42 crc kubenswrapper[5021]: I0121 15:36:42.838048 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/55ddc891-1df3-4fbb-8491-48d0bebf2b65-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7t22c\" (UID: \"55ddc891-1df3-4fbb-8491-48d0bebf2b65\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7t22c" Jan 21 15:36:42 crc kubenswrapper[5021]: I0121 15:36:42.855726 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tmk7r\" (UniqueName: \"kubernetes.io/projected/55ddc891-1df3-4fbb-8491-48d0bebf2b65-kube-api-access-tmk7r\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7t22c\" (UID: \"55ddc891-1df3-4fbb-8491-48d0bebf2b65\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7t22c" Jan 21 15:36:42 crc kubenswrapper[5021]: I0121 15:36:42.884899 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7t22c" Jan 21 15:36:43 crc kubenswrapper[5021]: I0121 15:36:43.292514 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7t22c"] Jan 21 15:36:43 crc kubenswrapper[5021]: W0121 15:36:43.298541 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod55ddc891_1df3_4fbb_8491_48d0bebf2b65.slice/crio-9d175cae6668119c807032549e05bb80f77d49ac5411fd593f37c3dea6345796 WatchSource:0}: Error finding container 9d175cae6668119c807032549e05bb80f77d49ac5411fd593f37c3dea6345796: Status 404 returned error can't find the container with id 9d175cae6668119c807032549e05bb80f77d49ac5411fd593f37c3dea6345796 Jan 21 15:36:43 crc kubenswrapper[5021]: I0121 15:36:43.978267 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-92qbd" podUID="d4b323b2-0188-4e04-ab45-bb9689a750a2" containerName="console" containerID="cri-o://2b742edb00546a05f5628be1a569f7182f553af3d1f5b8358c042a4ddf44a0f7" gracePeriod=15 Jan 21 15:36:44 crc kubenswrapper[5021]: I0121 15:36:44.009740 5021 generic.go:334] "Generic (PLEG): container finished" podID="55ddc891-1df3-4fbb-8491-48d0bebf2b65" containerID="26280c5a55352be19e757eb158cc5977f2b22a64e2fae87a62e60e755ad6886d" exitCode=0 Jan 21 15:36:44 crc kubenswrapper[5021]: I0121 15:36:44.009783 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7t22c" event={"ID":"55ddc891-1df3-4fbb-8491-48d0bebf2b65","Type":"ContainerDied","Data":"26280c5a55352be19e757eb158cc5977f2b22a64e2fae87a62e60e755ad6886d"} Jan 21 15:36:44 crc kubenswrapper[5021]: I0121 15:36:44.009807 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7t22c" event={"ID":"55ddc891-1df3-4fbb-8491-48d0bebf2b65","Type":"ContainerStarted","Data":"9d175cae6668119c807032549e05bb80f77d49ac5411fd593f37c3dea6345796"} Jan 21 15:36:44 crc kubenswrapper[5021]: I0121 15:36:44.359767 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-92qbd_d4b323b2-0188-4e04-ab45-bb9689a750a2/console/0.log" Jan 21 15:36:44 crc kubenswrapper[5021]: I0121 15:36:44.359832 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-92qbd" Jan 21 15:36:44 crc kubenswrapper[5021]: I0121 15:36:44.559955 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t2s99\" (UniqueName: \"kubernetes.io/projected/d4b323b2-0188-4e04-ab45-bb9689a750a2-kube-api-access-t2s99\") pod \"d4b323b2-0188-4e04-ab45-bb9689a750a2\" (UID: \"d4b323b2-0188-4e04-ab45-bb9689a750a2\") " Jan 21 15:36:44 crc kubenswrapper[5021]: I0121 15:36:44.560007 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/d4b323b2-0188-4e04-ab45-bb9689a750a2-service-ca\") pod \"d4b323b2-0188-4e04-ab45-bb9689a750a2\" (UID: \"d4b323b2-0188-4e04-ab45-bb9689a750a2\") " Jan 21 15:36:44 crc kubenswrapper[5021]: I0121 15:36:44.560053 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/d4b323b2-0188-4e04-ab45-bb9689a750a2-console-serving-cert\") pod \"d4b323b2-0188-4e04-ab45-bb9689a750a2\" (UID: \"d4b323b2-0188-4e04-ab45-bb9689a750a2\") " Jan 21 15:36:44 crc kubenswrapper[5021]: I0121 15:36:44.560095 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d4b323b2-0188-4e04-ab45-bb9689a750a2-trusted-ca-bundle\") pod \"d4b323b2-0188-4e04-ab45-bb9689a750a2\" (UID: \"d4b323b2-0188-4e04-ab45-bb9689a750a2\") " Jan 21 15:36:44 crc kubenswrapper[5021]: I0121 15:36:44.560158 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/d4b323b2-0188-4e04-ab45-bb9689a750a2-console-config\") pod \"d4b323b2-0188-4e04-ab45-bb9689a750a2\" (UID: \"d4b323b2-0188-4e04-ab45-bb9689a750a2\") " Jan 21 15:36:44 crc kubenswrapper[5021]: I0121 15:36:44.560188 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/d4b323b2-0188-4e04-ab45-bb9689a750a2-oauth-serving-cert\") pod \"d4b323b2-0188-4e04-ab45-bb9689a750a2\" (UID: \"d4b323b2-0188-4e04-ab45-bb9689a750a2\") " Jan 21 15:36:44 crc kubenswrapper[5021]: I0121 15:36:44.560224 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/d4b323b2-0188-4e04-ab45-bb9689a750a2-console-oauth-config\") pod \"d4b323b2-0188-4e04-ab45-bb9689a750a2\" (UID: \"d4b323b2-0188-4e04-ab45-bb9689a750a2\") " Jan 21 15:36:44 crc kubenswrapper[5021]: I0121 15:36:44.560770 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d4b323b2-0188-4e04-ab45-bb9689a750a2-console-config" (OuterVolumeSpecName: "console-config") pod "d4b323b2-0188-4e04-ab45-bb9689a750a2" (UID: "d4b323b2-0188-4e04-ab45-bb9689a750a2"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:36:44 crc kubenswrapper[5021]: I0121 15:36:44.560788 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d4b323b2-0188-4e04-ab45-bb9689a750a2-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "d4b323b2-0188-4e04-ab45-bb9689a750a2" (UID: "d4b323b2-0188-4e04-ab45-bb9689a750a2"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:36:44 crc kubenswrapper[5021]: I0121 15:36:44.560825 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d4b323b2-0188-4e04-ab45-bb9689a750a2-service-ca" (OuterVolumeSpecName: "service-ca") pod "d4b323b2-0188-4e04-ab45-bb9689a750a2" (UID: "d4b323b2-0188-4e04-ab45-bb9689a750a2"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:36:44 crc kubenswrapper[5021]: I0121 15:36:44.560937 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d4b323b2-0188-4e04-ab45-bb9689a750a2-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "d4b323b2-0188-4e04-ab45-bb9689a750a2" (UID: "d4b323b2-0188-4e04-ab45-bb9689a750a2"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:36:44 crc kubenswrapper[5021]: I0121 15:36:44.565919 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4b323b2-0188-4e04-ab45-bb9689a750a2-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "d4b323b2-0188-4e04-ab45-bb9689a750a2" (UID: "d4b323b2-0188-4e04-ab45-bb9689a750a2"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:36:44 crc kubenswrapper[5021]: I0121 15:36:44.566280 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4b323b2-0188-4e04-ab45-bb9689a750a2-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "d4b323b2-0188-4e04-ab45-bb9689a750a2" (UID: "d4b323b2-0188-4e04-ab45-bb9689a750a2"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:36:44 crc kubenswrapper[5021]: I0121 15:36:44.566348 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d4b323b2-0188-4e04-ab45-bb9689a750a2-kube-api-access-t2s99" (OuterVolumeSpecName: "kube-api-access-t2s99") pod "d4b323b2-0188-4e04-ab45-bb9689a750a2" (UID: "d4b323b2-0188-4e04-ab45-bb9689a750a2"). InnerVolumeSpecName "kube-api-access-t2s99". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:36:44 crc kubenswrapper[5021]: I0121 15:36:44.661893 5021 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/d4b323b2-0188-4e04-ab45-bb9689a750a2-console-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:36:44 crc kubenswrapper[5021]: I0121 15:36:44.661966 5021 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/d4b323b2-0188-4e04-ab45-bb9689a750a2-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 15:36:44 crc kubenswrapper[5021]: I0121 15:36:44.661977 5021 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/d4b323b2-0188-4e04-ab45-bb9689a750a2-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:36:44 crc kubenswrapper[5021]: I0121 15:36:44.662023 5021 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/d4b323b2-0188-4e04-ab45-bb9689a750a2-service-ca\") on node \"crc\" DevicePath \"\"" Jan 21 15:36:44 crc kubenswrapper[5021]: I0121 15:36:44.662036 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t2s99\" (UniqueName: \"kubernetes.io/projected/d4b323b2-0188-4e04-ab45-bb9689a750a2-kube-api-access-t2s99\") on node \"crc\" DevicePath \"\"" Jan 21 15:36:44 crc kubenswrapper[5021]: I0121 15:36:44.662050 5021 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/d4b323b2-0188-4e04-ab45-bb9689a750a2-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 21 15:36:44 crc kubenswrapper[5021]: I0121 15:36:44.662061 5021 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d4b323b2-0188-4e04-ab45-bb9689a750a2-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:36:45 crc kubenswrapper[5021]: I0121 15:36:45.018782 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-92qbd_d4b323b2-0188-4e04-ab45-bb9689a750a2/console/0.log" Jan 21 15:36:45 crc kubenswrapper[5021]: I0121 15:36:45.019451 5021 generic.go:334] "Generic (PLEG): container finished" podID="d4b323b2-0188-4e04-ab45-bb9689a750a2" containerID="2b742edb00546a05f5628be1a569f7182f553af3d1f5b8358c042a4ddf44a0f7" exitCode=2 Jan 21 15:36:45 crc kubenswrapper[5021]: I0121 15:36:45.019502 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-92qbd" event={"ID":"d4b323b2-0188-4e04-ab45-bb9689a750a2","Type":"ContainerDied","Data":"2b742edb00546a05f5628be1a569f7182f553af3d1f5b8358c042a4ddf44a0f7"} Jan 21 15:36:45 crc kubenswrapper[5021]: I0121 15:36:45.019534 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-92qbd" Jan 21 15:36:45 crc kubenswrapper[5021]: I0121 15:36:45.019562 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-92qbd" event={"ID":"d4b323b2-0188-4e04-ab45-bb9689a750a2","Type":"ContainerDied","Data":"9161386e90c503d545158086df6416a26900a7b5a48279c642d77dda68eb4382"} Jan 21 15:36:45 crc kubenswrapper[5021]: I0121 15:36:45.019598 5021 scope.go:117] "RemoveContainer" containerID="2b742edb00546a05f5628be1a569f7182f553af3d1f5b8358c042a4ddf44a0f7" Jan 21 15:36:45 crc kubenswrapper[5021]: I0121 15:36:45.044511 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-92qbd"] Jan 21 15:36:45 crc kubenswrapper[5021]: I0121 15:36:45.049354 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-92qbd"] Jan 21 15:36:45 crc kubenswrapper[5021]: I0121 15:36:45.080026 5021 scope.go:117] "RemoveContainer" containerID="2b742edb00546a05f5628be1a569f7182f553af3d1f5b8358c042a4ddf44a0f7" Jan 21 15:36:45 crc kubenswrapper[5021]: E0121 15:36:45.080872 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2b742edb00546a05f5628be1a569f7182f553af3d1f5b8358c042a4ddf44a0f7\": container with ID starting with 2b742edb00546a05f5628be1a569f7182f553af3d1f5b8358c042a4ddf44a0f7 not found: ID does not exist" containerID="2b742edb00546a05f5628be1a569f7182f553af3d1f5b8358c042a4ddf44a0f7" Jan 21 15:36:45 crc kubenswrapper[5021]: I0121 15:36:45.080960 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2b742edb00546a05f5628be1a569f7182f553af3d1f5b8358c042a4ddf44a0f7"} err="failed to get container status \"2b742edb00546a05f5628be1a569f7182f553af3d1f5b8358c042a4ddf44a0f7\": rpc error: code = NotFound desc = could not find container \"2b742edb00546a05f5628be1a569f7182f553af3d1f5b8358c042a4ddf44a0f7\": container with ID starting with 2b742edb00546a05f5628be1a569f7182f553af3d1f5b8358c042a4ddf44a0f7 not found: ID does not exist" Jan 21 15:36:46 crc kubenswrapper[5021]: I0121 15:36:46.043991 5021 generic.go:334] "Generic (PLEG): container finished" podID="55ddc891-1df3-4fbb-8491-48d0bebf2b65" containerID="934bf21d5131414a051b57c41d4bde5ba73f3e0a50062e674fe58d960a394c1e" exitCode=0 Jan 21 15:36:46 crc kubenswrapper[5021]: I0121 15:36:46.044063 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7t22c" event={"ID":"55ddc891-1df3-4fbb-8491-48d0bebf2b65","Type":"ContainerDied","Data":"934bf21d5131414a051b57c41d4bde5ba73f3e0a50062e674fe58d960a394c1e"} Jan 21 15:36:46 crc kubenswrapper[5021]: I0121 15:36:46.745783 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d4b323b2-0188-4e04-ab45-bb9689a750a2" path="/var/lib/kubelet/pods/d4b323b2-0188-4e04-ab45-bb9689a750a2/volumes" Jan 21 15:36:47 crc kubenswrapper[5021]: I0121 15:36:47.052949 5021 generic.go:334] "Generic (PLEG): container finished" podID="55ddc891-1df3-4fbb-8491-48d0bebf2b65" containerID="6f9dab1307693f7d5dcac877283ce4f0fc0dc881a59ec39e924355a0da673025" exitCode=0 Jan 21 15:36:47 crc kubenswrapper[5021]: I0121 15:36:47.052981 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7t22c" event={"ID":"55ddc891-1df3-4fbb-8491-48d0bebf2b65","Type":"ContainerDied","Data":"6f9dab1307693f7d5dcac877283ce4f0fc0dc881a59ec39e924355a0da673025"} Jan 21 15:36:48 crc kubenswrapper[5021]: I0121 15:36:48.289854 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7t22c" Jan 21 15:36:48 crc kubenswrapper[5021]: I0121 15:36:48.412224 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/55ddc891-1df3-4fbb-8491-48d0bebf2b65-bundle\") pod \"55ddc891-1df3-4fbb-8491-48d0bebf2b65\" (UID: \"55ddc891-1df3-4fbb-8491-48d0bebf2b65\") " Jan 21 15:36:48 crc kubenswrapper[5021]: I0121 15:36:48.412315 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tmk7r\" (UniqueName: \"kubernetes.io/projected/55ddc891-1df3-4fbb-8491-48d0bebf2b65-kube-api-access-tmk7r\") pod \"55ddc891-1df3-4fbb-8491-48d0bebf2b65\" (UID: \"55ddc891-1df3-4fbb-8491-48d0bebf2b65\") " Jan 21 15:36:48 crc kubenswrapper[5021]: I0121 15:36:48.412381 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/55ddc891-1df3-4fbb-8491-48d0bebf2b65-util\") pod \"55ddc891-1df3-4fbb-8491-48d0bebf2b65\" (UID: \"55ddc891-1df3-4fbb-8491-48d0bebf2b65\") " Jan 21 15:36:48 crc kubenswrapper[5021]: I0121 15:36:48.413491 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/55ddc891-1df3-4fbb-8491-48d0bebf2b65-bundle" (OuterVolumeSpecName: "bundle") pod "55ddc891-1df3-4fbb-8491-48d0bebf2b65" (UID: "55ddc891-1df3-4fbb-8491-48d0bebf2b65"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:36:48 crc kubenswrapper[5021]: I0121 15:36:48.419077 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/55ddc891-1df3-4fbb-8491-48d0bebf2b65-kube-api-access-tmk7r" (OuterVolumeSpecName: "kube-api-access-tmk7r") pod "55ddc891-1df3-4fbb-8491-48d0bebf2b65" (UID: "55ddc891-1df3-4fbb-8491-48d0bebf2b65"). InnerVolumeSpecName "kube-api-access-tmk7r". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:36:48 crc kubenswrapper[5021]: I0121 15:36:48.439567 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/55ddc891-1df3-4fbb-8491-48d0bebf2b65-util" (OuterVolumeSpecName: "util") pod "55ddc891-1df3-4fbb-8491-48d0bebf2b65" (UID: "55ddc891-1df3-4fbb-8491-48d0bebf2b65"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:36:48 crc kubenswrapper[5021]: I0121 15:36:48.514003 5021 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/55ddc891-1df3-4fbb-8491-48d0bebf2b65-util\") on node \"crc\" DevicePath \"\"" Jan 21 15:36:48 crc kubenswrapper[5021]: I0121 15:36:48.514333 5021 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/55ddc891-1df3-4fbb-8491-48d0bebf2b65-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:36:48 crc kubenswrapper[5021]: I0121 15:36:48.514413 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tmk7r\" (UniqueName: \"kubernetes.io/projected/55ddc891-1df3-4fbb-8491-48d0bebf2b65-kube-api-access-tmk7r\") on node \"crc\" DevicePath \"\"" Jan 21 15:36:49 crc kubenswrapper[5021]: I0121 15:36:49.072947 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7t22c" event={"ID":"55ddc891-1df3-4fbb-8491-48d0bebf2b65","Type":"ContainerDied","Data":"9d175cae6668119c807032549e05bb80f77d49ac5411fd593f37c3dea6345796"} Jan 21 15:36:49 crc kubenswrapper[5021]: I0121 15:36:49.073518 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9d175cae6668119c807032549e05bb80f77d49ac5411fd593f37c3dea6345796" Jan 21 15:36:49 crc kubenswrapper[5021]: I0121 15:36:49.073050 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7t22c" Jan 21 15:36:57 crc kubenswrapper[5021]: I0121 15:36:57.857343 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-5c75d9c54f-p7sb6"] Jan 21 15:36:57 crc kubenswrapper[5021]: E0121 15:36:57.858648 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55ddc891-1df3-4fbb-8491-48d0bebf2b65" containerName="extract" Jan 21 15:36:57 crc kubenswrapper[5021]: I0121 15:36:57.858669 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="55ddc891-1df3-4fbb-8491-48d0bebf2b65" containerName="extract" Jan 21 15:36:57 crc kubenswrapper[5021]: E0121 15:36:57.858681 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55ddc891-1df3-4fbb-8491-48d0bebf2b65" containerName="pull" Jan 21 15:36:57 crc kubenswrapper[5021]: I0121 15:36:57.858690 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="55ddc891-1df3-4fbb-8491-48d0bebf2b65" containerName="pull" Jan 21 15:36:57 crc kubenswrapper[5021]: E0121 15:36:57.858714 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4b323b2-0188-4e04-ab45-bb9689a750a2" containerName="console" Jan 21 15:36:57 crc kubenswrapper[5021]: I0121 15:36:57.858724 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4b323b2-0188-4e04-ab45-bb9689a750a2" containerName="console" Jan 21 15:36:57 crc kubenswrapper[5021]: E0121 15:36:57.858740 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55ddc891-1df3-4fbb-8491-48d0bebf2b65" containerName="util" Jan 21 15:36:57 crc kubenswrapper[5021]: I0121 15:36:57.858748 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="55ddc891-1df3-4fbb-8491-48d0bebf2b65" containerName="util" Jan 21 15:36:57 crc kubenswrapper[5021]: I0121 15:36:57.858927 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="55ddc891-1df3-4fbb-8491-48d0bebf2b65" containerName="extract" Jan 21 15:36:57 crc kubenswrapper[5021]: I0121 15:36:57.858954 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="d4b323b2-0188-4e04-ab45-bb9689a750a2" containerName="console" Jan 21 15:36:57 crc kubenswrapper[5021]: I0121 15:36:57.859633 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-5c75d9c54f-p7sb6" Jan 21 15:36:57 crc kubenswrapper[5021]: I0121 15:36:57.861483 5021 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-4r6lj" Jan 21 15:36:57 crc kubenswrapper[5021]: I0121 15:36:57.861595 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Jan 21 15:36:57 crc kubenswrapper[5021]: I0121 15:36:57.862525 5021 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Jan 21 15:36:57 crc kubenswrapper[5021]: I0121 15:36:57.862744 5021 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Jan 21 15:36:57 crc kubenswrapper[5021]: I0121 15:36:57.864408 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Jan 21 15:36:57 crc kubenswrapper[5021]: I0121 15:36:57.876211 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-5c75d9c54f-p7sb6"] Jan 21 15:36:58 crc kubenswrapper[5021]: I0121 15:36:58.047530 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w4ntw\" (UniqueName: \"kubernetes.io/projected/9a27e7e2-ee2e-4d6f-b0da-c814ebc2b1ed-kube-api-access-w4ntw\") pod \"metallb-operator-controller-manager-5c75d9c54f-p7sb6\" (UID: \"9a27e7e2-ee2e-4d6f-b0da-c814ebc2b1ed\") " pod="metallb-system/metallb-operator-controller-manager-5c75d9c54f-p7sb6" Jan 21 15:36:58 crc kubenswrapper[5021]: I0121 15:36:58.047587 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/9a27e7e2-ee2e-4d6f-b0da-c814ebc2b1ed-apiservice-cert\") pod \"metallb-operator-controller-manager-5c75d9c54f-p7sb6\" (UID: \"9a27e7e2-ee2e-4d6f-b0da-c814ebc2b1ed\") " pod="metallb-system/metallb-operator-controller-manager-5c75d9c54f-p7sb6" Jan 21 15:36:58 crc kubenswrapper[5021]: I0121 15:36:58.047668 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/9a27e7e2-ee2e-4d6f-b0da-c814ebc2b1ed-webhook-cert\") pod \"metallb-operator-controller-manager-5c75d9c54f-p7sb6\" (UID: \"9a27e7e2-ee2e-4d6f-b0da-c814ebc2b1ed\") " pod="metallb-system/metallb-operator-controller-manager-5c75d9c54f-p7sb6" Jan 21 15:36:58 crc kubenswrapper[5021]: I0121 15:36:58.149281 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/9a27e7e2-ee2e-4d6f-b0da-c814ebc2b1ed-webhook-cert\") pod \"metallb-operator-controller-manager-5c75d9c54f-p7sb6\" (UID: \"9a27e7e2-ee2e-4d6f-b0da-c814ebc2b1ed\") " pod="metallb-system/metallb-operator-controller-manager-5c75d9c54f-p7sb6" Jan 21 15:36:58 crc kubenswrapper[5021]: I0121 15:36:58.149422 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w4ntw\" (UniqueName: \"kubernetes.io/projected/9a27e7e2-ee2e-4d6f-b0da-c814ebc2b1ed-kube-api-access-w4ntw\") pod \"metallb-operator-controller-manager-5c75d9c54f-p7sb6\" (UID: \"9a27e7e2-ee2e-4d6f-b0da-c814ebc2b1ed\") " pod="metallb-system/metallb-operator-controller-manager-5c75d9c54f-p7sb6" Jan 21 15:36:58 crc kubenswrapper[5021]: I0121 15:36:58.149481 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/9a27e7e2-ee2e-4d6f-b0da-c814ebc2b1ed-apiservice-cert\") pod \"metallb-operator-controller-manager-5c75d9c54f-p7sb6\" (UID: \"9a27e7e2-ee2e-4d6f-b0da-c814ebc2b1ed\") " pod="metallb-system/metallb-operator-controller-manager-5c75d9c54f-p7sb6" Jan 21 15:36:58 crc kubenswrapper[5021]: I0121 15:36:58.162453 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/9a27e7e2-ee2e-4d6f-b0da-c814ebc2b1ed-webhook-cert\") pod \"metallb-operator-controller-manager-5c75d9c54f-p7sb6\" (UID: \"9a27e7e2-ee2e-4d6f-b0da-c814ebc2b1ed\") " pod="metallb-system/metallb-operator-controller-manager-5c75d9c54f-p7sb6" Jan 21 15:36:58 crc kubenswrapper[5021]: I0121 15:36:58.177499 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w4ntw\" (UniqueName: \"kubernetes.io/projected/9a27e7e2-ee2e-4d6f-b0da-c814ebc2b1ed-kube-api-access-w4ntw\") pod \"metallb-operator-controller-manager-5c75d9c54f-p7sb6\" (UID: \"9a27e7e2-ee2e-4d6f-b0da-c814ebc2b1ed\") " pod="metallb-system/metallb-operator-controller-manager-5c75d9c54f-p7sb6" Jan 21 15:36:58 crc kubenswrapper[5021]: I0121 15:36:58.178400 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/9a27e7e2-ee2e-4d6f-b0da-c814ebc2b1ed-apiservice-cert\") pod \"metallb-operator-controller-manager-5c75d9c54f-p7sb6\" (UID: \"9a27e7e2-ee2e-4d6f-b0da-c814ebc2b1ed\") " pod="metallb-system/metallb-operator-controller-manager-5c75d9c54f-p7sb6" Jan 21 15:36:58 crc kubenswrapper[5021]: I0121 15:36:58.185317 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-5c75d9c54f-p7sb6" Jan 21 15:36:58 crc kubenswrapper[5021]: I0121 15:36:58.223017 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-7449c97c5b-q6gvc"] Jan 21 15:36:58 crc kubenswrapper[5021]: I0121 15:36:58.223895 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-7449c97c5b-q6gvc" Jan 21 15:36:58 crc kubenswrapper[5021]: I0121 15:36:58.232588 5021 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Jan 21 15:36:58 crc kubenswrapper[5021]: I0121 15:36:58.232610 5021 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Jan 21 15:36:58 crc kubenswrapper[5021]: I0121 15:36:58.233202 5021 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-nvjvp" Jan 21 15:36:58 crc kubenswrapper[5021]: I0121 15:36:58.251154 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/3776b291-b33a-4d8e-bab3-cdd3a4f346e2-webhook-cert\") pod \"metallb-operator-webhook-server-7449c97c5b-q6gvc\" (UID: \"3776b291-b33a-4d8e-bab3-cdd3a4f346e2\") " pod="metallb-system/metallb-operator-webhook-server-7449c97c5b-q6gvc" Jan 21 15:36:58 crc kubenswrapper[5021]: I0121 15:36:58.251200 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/3776b291-b33a-4d8e-bab3-cdd3a4f346e2-apiservice-cert\") pod \"metallb-operator-webhook-server-7449c97c5b-q6gvc\" (UID: \"3776b291-b33a-4d8e-bab3-cdd3a4f346e2\") " pod="metallb-system/metallb-operator-webhook-server-7449c97c5b-q6gvc" Jan 21 15:36:58 crc kubenswrapper[5021]: I0121 15:36:58.251245 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l992l\" (UniqueName: \"kubernetes.io/projected/3776b291-b33a-4d8e-bab3-cdd3a4f346e2-kube-api-access-l992l\") pod \"metallb-operator-webhook-server-7449c97c5b-q6gvc\" (UID: \"3776b291-b33a-4d8e-bab3-cdd3a4f346e2\") " pod="metallb-system/metallb-operator-webhook-server-7449c97c5b-q6gvc" Jan 21 15:36:58 crc kubenswrapper[5021]: I0121 15:36:58.260150 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-7449c97c5b-q6gvc"] Jan 21 15:36:58 crc kubenswrapper[5021]: I0121 15:36:58.351789 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/3776b291-b33a-4d8e-bab3-cdd3a4f346e2-webhook-cert\") pod \"metallb-operator-webhook-server-7449c97c5b-q6gvc\" (UID: \"3776b291-b33a-4d8e-bab3-cdd3a4f346e2\") " pod="metallb-system/metallb-operator-webhook-server-7449c97c5b-q6gvc" Jan 21 15:36:58 crc kubenswrapper[5021]: I0121 15:36:58.351828 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/3776b291-b33a-4d8e-bab3-cdd3a4f346e2-apiservice-cert\") pod \"metallb-operator-webhook-server-7449c97c5b-q6gvc\" (UID: \"3776b291-b33a-4d8e-bab3-cdd3a4f346e2\") " pod="metallb-system/metallb-operator-webhook-server-7449c97c5b-q6gvc" Jan 21 15:36:58 crc kubenswrapper[5021]: I0121 15:36:58.351872 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l992l\" (UniqueName: \"kubernetes.io/projected/3776b291-b33a-4d8e-bab3-cdd3a4f346e2-kube-api-access-l992l\") pod \"metallb-operator-webhook-server-7449c97c5b-q6gvc\" (UID: \"3776b291-b33a-4d8e-bab3-cdd3a4f346e2\") " pod="metallb-system/metallb-operator-webhook-server-7449c97c5b-q6gvc" Jan 21 15:36:58 crc kubenswrapper[5021]: I0121 15:36:58.363517 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/3776b291-b33a-4d8e-bab3-cdd3a4f346e2-apiservice-cert\") pod \"metallb-operator-webhook-server-7449c97c5b-q6gvc\" (UID: \"3776b291-b33a-4d8e-bab3-cdd3a4f346e2\") " pod="metallb-system/metallb-operator-webhook-server-7449c97c5b-q6gvc" Jan 21 15:36:58 crc kubenswrapper[5021]: I0121 15:36:58.363554 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/3776b291-b33a-4d8e-bab3-cdd3a4f346e2-webhook-cert\") pod \"metallb-operator-webhook-server-7449c97c5b-q6gvc\" (UID: \"3776b291-b33a-4d8e-bab3-cdd3a4f346e2\") " pod="metallb-system/metallb-operator-webhook-server-7449c97c5b-q6gvc" Jan 21 15:36:58 crc kubenswrapper[5021]: I0121 15:36:58.370595 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l992l\" (UniqueName: \"kubernetes.io/projected/3776b291-b33a-4d8e-bab3-cdd3a4f346e2-kube-api-access-l992l\") pod \"metallb-operator-webhook-server-7449c97c5b-q6gvc\" (UID: \"3776b291-b33a-4d8e-bab3-cdd3a4f346e2\") " pod="metallb-system/metallb-operator-webhook-server-7449c97c5b-q6gvc" Jan 21 15:36:58 crc kubenswrapper[5021]: I0121 15:36:58.514492 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-5c75d9c54f-p7sb6"] Jan 21 15:36:58 crc kubenswrapper[5021]: W0121 15:36:58.525645 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9a27e7e2_ee2e_4d6f_b0da_c814ebc2b1ed.slice/crio-86f14847a4d935a751b96f40d39eb608e9f8edc5df3229bc52bfad4a8800f969 WatchSource:0}: Error finding container 86f14847a4d935a751b96f40d39eb608e9f8edc5df3229bc52bfad4a8800f969: Status 404 returned error can't find the container with id 86f14847a4d935a751b96f40d39eb608e9f8edc5df3229bc52bfad4a8800f969 Jan 21 15:36:58 crc kubenswrapper[5021]: I0121 15:36:58.595472 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-7449c97c5b-q6gvc" Jan 21 15:36:58 crc kubenswrapper[5021]: I0121 15:36:58.842829 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-7449c97c5b-q6gvc"] Jan 21 15:36:58 crc kubenswrapper[5021]: W0121 15:36:58.848020 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3776b291_b33a_4d8e_bab3_cdd3a4f346e2.slice/crio-d793773fd4068cb6d1a3466c2d701f3c0fb5bf1f3bf4a6c143f49acbbb5039d8 WatchSource:0}: Error finding container d793773fd4068cb6d1a3466c2d701f3c0fb5bf1f3bf4a6c143f49acbbb5039d8: Status 404 returned error can't find the container with id d793773fd4068cb6d1a3466c2d701f3c0fb5bf1f3bf4a6c143f49acbbb5039d8 Jan 21 15:36:59 crc kubenswrapper[5021]: I0121 15:36:59.134943 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-5c75d9c54f-p7sb6" event={"ID":"9a27e7e2-ee2e-4d6f-b0da-c814ebc2b1ed","Type":"ContainerStarted","Data":"86f14847a4d935a751b96f40d39eb608e9f8edc5df3229bc52bfad4a8800f969"} Jan 21 15:36:59 crc kubenswrapper[5021]: I0121 15:36:59.135990 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-7449c97c5b-q6gvc" event={"ID":"3776b291-b33a-4d8e-bab3-cdd3a4f346e2","Type":"ContainerStarted","Data":"d793773fd4068cb6d1a3466c2d701f3c0fb5bf1f3bf4a6c143f49acbbb5039d8"} Jan 21 15:37:02 crc kubenswrapper[5021]: I0121 15:37:02.158496 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-5c75d9c54f-p7sb6" event={"ID":"9a27e7e2-ee2e-4d6f-b0da-c814ebc2b1ed","Type":"ContainerStarted","Data":"b486eecbff51cd001beb1d90f105a09cf0e94a914e33600df23140f07fa15d87"} Jan 21 15:37:02 crc kubenswrapper[5021]: I0121 15:37:02.159091 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-5c75d9c54f-p7sb6" Jan 21 15:37:03 crc kubenswrapper[5021]: I0121 15:37:03.767083 5021 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 21 15:37:04 crc kubenswrapper[5021]: I0121 15:37:04.172713 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-7449c97c5b-q6gvc" event={"ID":"3776b291-b33a-4d8e-bab3-cdd3a4f346e2","Type":"ContainerStarted","Data":"eb429d0f24697cddc224825fba7fa0288a7afb0fc0ad2449025fb498804fdc2c"} Jan 21 15:37:04 crc kubenswrapper[5021]: I0121 15:37:04.172966 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-7449c97c5b-q6gvc" Jan 21 15:37:04 crc kubenswrapper[5021]: I0121 15:37:04.197348 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-5c75d9c54f-p7sb6" podStartSLOduration=4.36076492 podStartE2EDuration="7.197328875s" podCreationTimestamp="2026-01-21 15:36:57 +0000 UTC" firstStartedPulling="2026-01-21 15:36:58.528242104 +0000 UTC m=+760.063355993" lastFinishedPulling="2026-01-21 15:37:01.364806059 +0000 UTC m=+762.899919948" observedRunningTime="2026-01-21 15:37:02.185661197 +0000 UTC m=+763.720775086" watchObservedRunningTime="2026-01-21 15:37:04.197328875 +0000 UTC m=+765.732442764" Jan 21 15:37:04 crc kubenswrapper[5021]: I0121 15:37:04.198151 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-7449c97c5b-q6gvc" podStartSLOduration=1.183493236 podStartE2EDuration="6.198146967s" podCreationTimestamp="2026-01-21 15:36:58 +0000 UTC" firstStartedPulling="2026-01-21 15:36:58.852689155 +0000 UTC m=+760.387803044" lastFinishedPulling="2026-01-21 15:37:03.867342886 +0000 UTC m=+765.402456775" observedRunningTime="2026-01-21 15:37:04.197502639 +0000 UTC m=+765.732616538" watchObservedRunningTime="2026-01-21 15:37:04.198146967 +0000 UTC m=+765.733260856" Jan 21 15:37:12 crc kubenswrapper[5021]: I0121 15:37:12.356982 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 15:37:12 crc kubenswrapper[5021]: I0121 15:37:12.357460 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 15:37:18 crc kubenswrapper[5021]: I0121 15:37:18.625491 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-7449c97c5b-q6gvc" Jan 21 15:37:38 crc kubenswrapper[5021]: I0121 15:37:38.187416 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-5c75d9c54f-p7sb6" Jan 21 15:37:38 crc kubenswrapper[5021]: I0121 15:37:38.860526 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-ld56k"] Jan 21 15:37:38 crc kubenswrapper[5021]: I0121 15:37:38.863710 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-ld56k" Jan 21 15:37:38 crc kubenswrapper[5021]: I0121 15:37:38.867619 5021 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Jan 21 15:37:38 crc kubenswrapper[5021]: I0121 15:37:38.867803 5021 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-xcpvl" Jan 21 15:37:38 crc kubenswrapper[5021]: I0121 15:37:38.867889 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Jan 21 15:37:38 crc kubenswrapper[5021]: I0121 15:37:38.869830 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-cctjf"] Jan 21 15:37:38 crc kubenswrapper[5021]: I0121 15:37:38.870838 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-cctjf" Jan 21 15:37:38 crc kubenswrapper[5021]: I0121 15:37:38.875113 5021 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Jan 21 15:37:38 crc kubenswrapper[5021]: I0121 15:37:38.894971 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-cctjf"] Jan 21 15:37:38 crc kubenswrapper[5021]: I0121 15:37:38.928984 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/64c81441-7b12-4e6d-9c22-08692b3e61e2-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-cctjf\" (UID: \"64c81441-7b12-4e6d-9c22-08692b3e61e2\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-cctjf" Jan 21 15:37:38 crc kubenswrapper[5021]: I0121 15:37:38.929034 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vhplj\" (UniqueName: \"kubernetes.io/projected/97b76490-a49c-4ddc-b6ba-7fbda7094851-kube-api-access-vhplj\") pod \"frr-k8s-ld56k\" (UID: \"97b76490-a49c-4ddc-b6ba-7fbda7094851\") " pod="metallb-system/frr-k8s-ld56k" Jan 21 15:37:38 crc kubenswrapper[5021]: I0121 15:37:38.929067 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/97b76490-a49c-4ddc-b6ba-7fbda7094851-frr-conf\") pod \"frr-k8s-ld56k\" (UID: \"97b76490-a49c-4ddc-b6ba-7fbda7094851\") " pod="metallb-system/frr-k8s-ld56k" Jan 21 15:37:38 crc kubenswrapper[5021]: I0121 15:37:38.929100 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/97b76490-a49c-4ddc-b6ba-7fbda7094851-frr-sockets\") pod \"frr-k8s-ld56k\" (UID: \"97b76490-a49c-4ddc-b6ba-7fbda7094851\") " pod="metallb-system/frr-k8s-ld56k" Jan 21 15:37:38 crc kubenswrapper[5021]: I0121 15:37:38.929134 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/97b76490-a49c-4ddc-b6ba-7fbda7094851-reloader\") pod \"frr-k8s-ld56k\" (UID: \"97b76490-a49c-4ddc-b6ba-7fbda7094851\") " pod="metallb-system/frr-k8s-ld56k" Jan 21 15:37:38 crc kubenswrapper[5021]: I0121 15:37:38.929165 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-swgpr\" (UniqueName: \"kubernetes.io/projected/64c81441-7b12-4e6d-9c22-08692b3e61e2-kube-api-access-swgpr\") pod \"frr-k8s-webhook-server-7df86c4f6c-cctjf\" (UID: \"64c81441-7b12-4e6d-9c22-08692b3e61e2\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-cctjf" Jan 21 15:37:38 crc kubenswrapper[5021]: I0121 15:37:38.929187 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/97b76490-a49c-4ddc-b6ba-7fbda7094851-frr-startup\") pod \"frr-k8s-ld56k\" (UID: \"97b76490-a49c-4ddc-b6ba-7fbda7094851\") " pod="metallb-system/frr-k8s-ld56k" Jan 21 15:37:38 crc kubenswrapper[5021]: I0121 15:37:38.929367 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/97b76490-a49c-4ddc-b6ba-7fbda7094851-metrics-certs\") pod \"frr-k8s-ld56k\" (UID: \"97b76490-a49c-4ddc-b6ba-7fbda7094851\") " pod="metallb-system/frr-k8s-ld56k" Jan 21 15:37:38 crc kubenswrapper[5021]: I0121 15:37:38.929482 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/97b76490-a49c-4ddc-b6ba-7fbda7094851-metrics\") pod \"frr-k8s-ld56k\" (UID: \"97b76490-a49c-4ddc-b6ba-7fbda7094851\") " pod="metallb-system/frr-k8s-ld56k" Jan 21 15:37:38 crc kubenswrapper[5021]: I0121 15:37:38.955424 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-zpz26"] Jan 21 15:37:38 crc kubenswrapper[5021]: I0121 15:37:38.956610 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-zpz26" Jan 21 15:37:38 crc kubenswrapper[5021]: I0121 15:37:38.958201 5021 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Jan 21 15:37:38 crc kubenswrapper[5021]: I0121 15:37:38.958626 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Jan 21 15:37:38 crc kubenswrapper[5021]: I0121 15:37:38.959114 5021 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Jan 21 15:37:38 crc kubenswrapper[5021]: I0121 15:37:38.959350 5021 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-tmq4z" Jan 21 15:37:38 crc kubenswrapper[5021]: I0121 15:37:38.960773 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6968d8fdc4-7k5j6"] Jan 21 15:37:38 crc kubenswrapper[5021]: I0121 15:37:38.962747 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6968d8fdc4-7k5j6" Jan 21 15:37:38 crc kubenswrapper[5021]: I0121 15:37:38.964927 5021 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Jan 21 15:37:39 crc kubenswrapper[5021]: I0121 15:37:39.018871 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6968d8fdc4-7k5j6"] Jan 21 15:37:39 crc kubenswrapper[5021]: I0121 15:37:39.031541 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/1ed1517c-6c3c-48a1-8caf-aac692b8b088-cert\") pod \"controller-6968d8fdc4-7k5j6\" (UID: \"1ed1517c-6c3c-48a1-8caf-aac692b8b088\") " pod="metallb-system/controller-6968d8fdc4-7k5j6" Jan 21 15:37:39 crc kubenswrapper[5021]: I0121 15:37:39.031610 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/97b76490-a49c-4ddc-b6ba-7fbda7094851-frr-sockets\") pod \"frr-k8s-ld56k\" (UID: \"97b76490-a49c-4ddc-b6ba-7fbda7094851\") " pod="metallb-system/frr-k8s-ld56k" Jan 21 15:37:39 crc kubenswrapper[5021]: I0121 15:37:39.031641 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5xg5h\" (UniqueName: \"kubernetes.io/projected/81ddebd8-1787-4b73-a670-11b1d0686123-kube-api-access-5xg5h\") pod \"speaker-zpz26\" (UID: \"81ddebd8-1787-4b73-a670-11b1d0686123\") " pod="metallb-system/speaker-zpz26" Jan 21 15:37:39 crc kubenswrapper[5021]: I0121 15:37:39.031691 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/97b76490-a49c-4ddc-b6ba-7fbda7094851-reloader\") pod \"frr-k8s-ld56k\" (UID: \"97b76490-a49c-4ddc-b6ba-7fbda7094851\") " pod="metallb-system/frr-k8s-ld56k" Jan 21 15:37:39 crc kubenswrapper[5021]: I0121 15:37:39.031971 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/81ddebd8-1787-4b73-a670-11b1d0686123-memberlist\") pod \"speaker-zpz26\" (UID: \"81ddebd8-1787-4b73-a670-11b1d0686123\") " pod="metallb-system/speaker-zpz26" Jan 21 15:37:39 crc kubenswrapper[5021]: I0121 15:37:39.032011 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-swgpr\" (UniqueName: \"kubernetes.io/projected/64c81441-7b12-4e6d-9c22-08692b3e61e2-kube-api-access-swgpr\") pod \"frr-k8s-webhook-server-7df86c4f6c-cctjf\" (UID: \"64c81441-7b12-4e6d-9c22-08692b3e61e2\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-cctjf" Jan 21 15:37:39 crc kubenswrapper[5021]: I0121 15:37:39.032037 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/97b76490-a49c-4ddc-b6ba-7fbda7094851-frr-startup\") pod \"frr-k8s-ld56k\" (UID: \"97b76490-a49c-4ddc-b6ba-7fbda7094851\") " pod="metallb-system/frr-k8s-ld56k" Jan 21 15:37:39 crc kubenswrapper[5021]: I0121 15:37:39.032062 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/81ddebd8-1787-4b73-a670-11b1d0686123-metrics-certs\") pod \"speaker-zpz26\" (UID: \"81ddebd8-1787-4b73-a670-11b1d0686123\") " pod="metallb-system/speaker-zpz26" Jan 21 15:37:39 crc kubenswrapper[5021]: I0121 15:37:39.032096 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/81ddebd8-1787-4b73-a670-11b1d0686123-metallb-excludel2\") pod \"speaker-zpz26\" (UID: \"81ddebd8-1787-4b73-a670-11b1d0686123\") " pod="metallb-system/speaker-zpz26" Jan 21 15:37:39 crc kubenswrapper[5021]: I0121 15:37:39.032123 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/97b76490-a49c-4ddc-b6ba-7fbda7094851-metrics-certs\") pod \"frr-k8s-ld56k\" (UID: \"97b76490-a49c-4ddc-b6ba-7fbda7094851\") " pod="metallb-system/frr-k8s-ld56k" Jan 21 15:37:39 crc kubenswrapper[5021]: I0121 15:37:39.032592 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1ed1517c-6c3c-48a1-8caf-aac692b8b088-metrics-certs\") pod \"controller-6968d8fdc4-7k5j6\" (UID: \"1ed1517c-6c3c-48a1-8caf-aac692b8b088\") " pod="metallb-system/controller-6968d8fdc4-7k5j6" Jan 21 15:37:39 crc kubenswrapper[5021]: I0121 15:37:39.032731 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/97b76490-a49c-4ddc-b6ba-7fbda7094851-metrics\") pod \"frr-k8s-ld56k\" (UID: \"97b76490-a49c-4ddc-b6ba-7fbda7094851\") " pod="metallb-system/frr-k8s-ld56k" Jan 21 15:37:39 crc kubenswrapper[5021]: I0121 15:37:39.036400 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/97b76490-a49c-4ddc-b6ba-7fbda7094851-metrics\") pod \"frr-k8s-ld56k\" (UID: \"97b76490-a49c-4ddc-b6ba-7fbda7094851\") " pod="metallb-system/frr-k8s-ld56k" Jan 21 15:37:39 crc kubenswrapper[5021]: I0121 15:37:39.032614 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/97b76490-a49c-4ddc-b6ba-7fbda7094851-frr-sockets\") pod \"frr-k8s-ld56k\" (UID: \"97b76490-a49c-4ddc-b6ba-7fbda7094851\") " pod="metallb-system/frr-k8s-ld56k" Jan 21 15:37:39 crc kubenswrapper[5021]: I0121 15:37:39.036444 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hnbgc\" (UniqueName: \"kubernetes.io/projected/1ed1517c-6c3c-48a1-8caf-aac692b8b088-kube-api-access-hnbgc\") pod \"controller-6968d8fdc4-7k5j6\" (UID: \"1ed1517c-6c3c-48a1-8caf-aac692b8b088\") " pod="metallb-system/controller-6968d8fdc4-7k5j6" Jan 21 15:37:39 crc kubenswrapper[5021]: I0121 15:37:39.037040 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/97b76490-a49c-4ddc-b6ba-7fbda7094851-frr-startup\") pod \"frr-k8s-ld56k\" (UID: \"97b76490-a49c-4ddc-b6ba-7fbda7094851\") " pod="metallb-system/frr-k8s-ld56k" Jan 21 15:37:39 crc kubenswrapper[5021]: I0121 15:37:39.037091 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/64c81441-7b12-4e6d-9c22-08692b3e61e2-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-cctjf\" (UID: \"64c81441-7b12-4e6d-9c22-08692b3e61e2\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-cctjf" Jan 21 15:37:39 crc kubenswrapper[5021]: I0121 15:37:39.037139 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vhplj\" (UniqueName: \"kubernetes.io/projected/97b76490-a49c-4ddc-b6ba-7fbda7094851-kube-api-access-vhplj\") pod \"frr-k8s-ld56k\" (UID: \"97b76490-a49c-4ddc-b6ba-7fbda7094851\") " pod="metallb-system/frr-k8s-ld56k" Jan 21 15:37:39 crc kubenswrapper[5021]: E0121 15:37:39.037199 5021 secret.go:188] Couldn't get secret metallb-system/frr-k8s-webhook-server-cert: secret "frr-k8s-webhook-server-cert" not found Jan 21 15:37:39 crc kubenswrapper[5021]: I0121 15:37:39.037211 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/97b76490-a49c-4ddc-b6ba-7fbda7094851-frr-conf\") pod \"frr-k8s-ld56k\" (UID: \"97b76490-a49c-4ddc-b6ba-7fbda7094851\") " pod="metallb-system/frr-k8s-ld56k" Jan 21 15:37:39 crc kubenswrapper[5021]: E0121 15:37:39.037261 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/64c81441-7b12-4e6d-9c22-08692b3e61e2-cert podName:64c81441-7b12-4e6d-9c22-08692b3e61e2 nodeName:}" failed. No retries permitted until 2026-01-21 15:37:39.537240865 +0000 UTC m=+801.072354754 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/64c81441-7b12-4e6d-9c22-08692b3e61e2-cert") pod "frr-k8s-webhook-server-7df86c4f6c-cctjf" (UID: "64c81441-7b12-4e6d-9c22-08692b3e61e2") : secret "frr-k8s-webhook-server-cert" not found Jan 21 15:37:39 crc kubenswrapper[5021]: I0121 15:37:39.037581 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/97b76490-a49c-4ddc-b6ba-7fbda7094851-reloader\") pod \"frr-k8s-ld56k\" (UID: \"97b76490-a49c-4ddc-b6ba-7fbda7094851\") " pod="metallb-system/frr-k8s-ld56k" Jan 21 15:37:39 crc kubenswrapper[5021]: I0121 15:37:39.037736 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/97b76490-a49c-4ddc-b6ba-7fbda7094851-frr-conf\") pod \"frr-k8s-ld56k\" (UID: \"97b76490-a49c-4ddc-b6ba-7fbda7094851\") " pod="metallb-system/frr-k8s-ld56k" Jan 21 15:37:39 crc kubenswrapper[5021]: I0121 15:37:39.044951 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/97b76490-a49c-4ddc-b6ba-7fbda7094851-metrics-certs\") pod \"frr-k8s-ld56k\" (UID: \"97b76490-a49c-4ddc-b6ba-7fbda7094851\") " pod="metallb-system/frr-k8s-ld56k" Jan 21 15:37:39 crc kubenswrapper[5021]: I0121 15:37:39.053998 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-swgpr\" (UniqueName: \"kubernetes.io/projected/64c81441-7b12-4e6d-9c22-08692b3e61e2-kube-api-access-swgpr\") pod \"frr-k8s-webhook-server-7df86c4f6c-cctjf\" (UID: \"64c81441-7b12-4e6d-9c22-08692b3e61e2\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-cctjf" Jan 21 15:37:39 crc kubenswrapper[5021]: I0121 15:37:39.057022 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vhplj\" (UniqueName: \"kubernetes.io/projected/97b76490-a49c-4ddc-b6ba-7fbda7094851-kube-api-access-vhplj\") pod \"frr-k8s-ld56k\" (UID: \"97b76490-a49c-4ddc-b6ba-7fbda7094851\") " pod="metallb-system/frr-k8s-ld56k" Jan 21 15:37:39 crc kubenswrapper[5021]: I0121 15:37:39.138707 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/81ddebd8-1787-4b73-a670-11b1d0686123-metrics-certs\") pod \"speaker-zpz26\" (UID: \"81ddebd8-1787-4b73-a670-11b1d0686123\") " pod="metallb-system/speaker-zpz26" Jan 21 15:37:39 crc kubenswrapper[5021]: I0121 15:37:39.138783 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/81ddebd8-1787-4b73-a670-11b1d0686123-metallb-excludel2\") pod \"speaker-zpz26\" (UID: \"81ddebd8-1787-4b73-a670-11b1d0686123\") " pod="metallb-system/speaker-zpz26" Jan 21 15:37:39 crc kubenswrapper[5021]: I0121 15:37:39.138816 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1ed1517c-6c3c-48a1-8caf-aac692b8b088-metrics-certs\") pod \"controller-6968d8fdc4-7k5j6\" (UID: \"1ed1517c-6c3c-48a1-8caf-aac692b8b088\") " pod="metallb-system/controller-6968d8fdc4-7k5j6" Jan 21 15:37:39 crc kubenswrapper[5021]: I0121 15:37:39.138852 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hnbgc\" (UniqueName: \"kubernetes.io/projected/1ed1517c-6c3c-48a1-8caf-aac692b8b088-kube-api-access-hnbgc\") pod \"controller-6968d8fdc4-7k5j6\" (UID: \"1ed1517c-6c3c-48a1-8caf-aac692b8b088\") " pod="metallb-system/controller-6968d8fdc4-7k5j6" Jan 21 15:37:39 crc kubenswrapper[5021]: I0121 15:37:39.138925 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/1ed1517c-6c3c-48a1-8caf-aac692b8b088-cert\") pod \"controller-6968d8fdc4-7k5j6\" (UID: \"1ed1517c-6c3c-48a1-8caf-aac692b8b088\") " pod="metallb-system/controller-6968d8fdc4-7k5j6" Jan 21 15:37:39 crc kubenswrapper[5021]: I0121 15:37:39.138946 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5xg5h\" (UniqueName: \"kubernetes.io/projected/81ddebd8-1787-4b73-a670-11b1d0686123-kube-api-access-5xg5h\") pod \"speaker-zpz26\" (UID: \"81ddebd8-1787-4b73-a670-11b1d0686123\") " pod="metallb-system/speaker-zpz26" Jan 21 15:37:39 crc kubenswrapper[5021]: I0121 15:37:39.138983 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/81ddebd8-1787-4b73-a670-11b1d0686123-memberlist\") pod \"speaker-zpz26\" (UID: \"81ddebd8-1787-4b73-a670-11b1d0686123\") " pod="metallb-system/speaker-zpz26" Jan 21 15:37:39 crc kubenswrapper[5021]: E0121 15:37:39.139113 5021 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Jan 21 15:37:39 crc kubenswrapper[5021]: E0121 15:37:39.139170 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/81ddebd8-1787-4b73-a670-11b1d0686123-memberlist podName:81ddebd8-1787-4b73-a670-11b1d0686123 nodeName:}" failed. No retries permitted until 2026-01-21 15:37:39.639149381 +0000 UTC m=+801.174263270 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/81ddebd8-1787-4b73-a670-11b1d0686123-memberlist") pod "speaker-zpz26" (UID: "81ddebd8-1787-4b73-a670-11b1d0686123") : secret "metallb-memberlist" not found Jan 21 15:37:39 crc kubenswrapper[5021]: I0121 15:37:39.142096 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/81ddebd8-1787-4b73-a670-11b1d0686123-metallb-excludel2\") pod \"speaker-zpz26\" (UID: \"81ddebd8-1787-4b73-a670-11b1d0686123\") " pod="metallb-system/speaker-zpz26" Jan 21 15:37:39 crc kubenswrapper[5021]: I0121 15:37:39.142551 5021 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Jan 21 15:37:39 crc kubenswrapper[5021]: I0121 15:37:39.148467 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1ed1517c-6c3c-48a1-8caf-aac692b8b088-metrics-certs\") pod \"controller-6968d8fdc4-7k5j6\" (UID: \"1ed1517c-6c3c-48a1-8caf-aac692b8b088\") " pod="metallb-system/controller-6968d8fdc4-7k5j6" Jan 21 15:37:39 crc kubenswrapper[5021]: I0121 15:37:39.153277 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/1ed1517c-6c3c-48a1-8caf-aac692b8b088-cert\") pod \"controller-6968d8fdc4-7k5j6\" (UID: \"1ed1517c-6c3c-48a1-8caf-aac692b8b088\") " pod="metallb-system/controller-6968d8fdc4-7k5j6" Jan 21 15:37:39 crc kubenswrapper[5021]: I0121 15:37:39.156171 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/81ddebd8-1787-4b73-a670-11b1d0686123-metrics-certs\") pod \"speaker-zpz26\" (UID: \"81ddebd8-1787-4b73-a670-11b1d0686123\") " pod="metallb-system/speaker-zpz26" Jan 21 15:37:39 crc kubenswrapper[5021]: I0121 15:37:39.160151 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5xg5h\" (UniqueName: \"kubernetes.io/projected/81ddebd8-1787-4b73-a670-11b1d0686123-kube-api-access-5xg5h\") pod \"speaker-zpz26\" (UID: \"81ddebd8-1787-4b73-a670-11b1d0686123\") " pod="metallb-system/speaker-zpz26" Jan 21 15:37:39 crc kubenswrapper[5021]: I0121 15:37:39.164555 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hnbgc\" (UniqueName: \"kubernetes.io/projected/1ed1517c-6c3c-48a1-8caf-aac692b8b088-kube-api-access-hnbgc\") pod \"controller-6968d8fdc4-7k5j6\" (UID: \"1ed1517c-6c3c-48a1-8caf-aac692b8b088\") " pod="metallb-system/controller-6968d8fdc4-7k5j6" Jan 21 15:37:39 crc kubenswrapper[5021]: I0121 15:37:39.185889 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-ld56k" Jan 21 15:37:39 crc kubenswrapper[5021]: I0121 15:37:39.307412 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6968d8fdc4-7k5j6" Jan 21 15:37:39 crc kubenswrapper[5021]: I0121 15:37:39.381488 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-ld56k" event={"ID":"97b76490-a49c-4ddc-b6ba-7fbda7094851","Type":"ContainerStarted","Data":"2745bc73a7a61e51ce2a4ffae0578beec439c262ca7332fb92fbf4274bf72b93"} Jan 21 15:37:39 crc kubenswrapper[5021]: I0121 15:37:39.545756 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/64c81441-7b12-4e6d-9c22-08692b3e61e2-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-cctjf\" (UID: \"64c81441-7b12-4e6d-9c22-08692b3e61e2\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-cctjf" Jan 21 15:37:39 crc kubenswrapper[5021]: I0121 15:37:39.552809 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/64c81441-7b12-4e6d-9c22-08692b3e61e2-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-cctjf\" (UID: \"64c81441-7b12-4e6d-9c22-08692b3e61e2\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-cctjf" Jan 21 15:37:39 crc kubenswrapper[5021]: I0121 15:37:39.602877 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6968d8fdc4-7k5j6"] Jan 21 15:37:39 crc kubenswrapper[5021]: I0121 15:37:39.647727 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/81ddebd8-1787-4b73-a670-11b1d0686123-memberlist\") pod \"speaker-zpz26\" (UID: \"81ddebd8-1787-4b73-a670-11b1d0686123\") " pod="metallb-system/speaker-zpz26" Jan 21 15:37:39 crc kubenswrapper[5021]: E0121 15:37:39.647971 5021 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Jan 21 15:37:39 crc kubenswrapper[5021]: E0121 15:37:39.648079 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/81ddebd8-1787-4b73-a670-11b1d0686123-memberlist podName:81ddebd8-1787-4b73-a670-11b1d0686123 nodeName:}" failed. No retries permitted until 2026-01-21 15:37:40.648055071 +0000 UTC m=+802.183168950 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/81ddebd8-1787-4b73-a670-11b1d0686123-memberlist") pod "speaker-zpz26" (UID: "81ddebd8-1787-4b73-a670-11b1d0686123") : secret "metallb-memberlist" not found Jan 21 15:37:39 crc kubenswrapper[5021]: I0121 15:37:39.801271 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-cctjf" Jan 21 15:37:40 crc kubenswrapper[5021]: I0121 15:37:40.089391 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-cctjf"] Jan 21 15:37:40 crc kubenswrapper[5021]: W0121 15:37:40.100072 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod64c81441_7b12_4e6d_9c22_08692b3e61e2.slice/crio-fa93e5ba91b8e0973244566fc1ffc1a59acfb5ce5d131db62d150f3b13f0d362 WatchSource:0}: Error finding container fa93e5ba91b8e0973244566fc1ffc1a59acfb5ce5d131db62d150f3b13f0d362: Status 404 returned error can't find the container with id fa93e5ba91b8e0973244566fc1ffc1a59acfb5ce5d131db62d150f3b13f0d362 Jan 21 15:37:40 crc kubenswrapper[5021]: I0121 15:37:40.387569 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-cctjf" event={"ID":"64c81441-7b12-4e6d-9c22-08692b3e61e2","Type":"ContainerStarted","Data":"fa93e5ba91b8e0973244566fc1ffc1a59acfb5ce5d131db62d150f3b13f0d362"} Jan 21 15:37:40 crc kubenswrapper[5021]: I0121 15:37:40.390075 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-7k5j6" event={"ID":"1ed1517c-6c3c-48a1-8caf-aac692b8b088","Type":"ContainerStarted","Data":"25bd63531557045dd6fe27743d21b9a3447c5fea5d0dc3e1bf6e8547c9847512"} Jan 21 15:37:40 crc kubenswrapper[5021]: I0121 15:37:40.390128 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-7k5j6" event={"ID":"1ed1517c-6c3c-48a1-8caf-aac692b8b088","Type":"ContainerStarted","Data":"823f2dba5c9a42b4570912d7bfa17cec7ef088dcf180dc3df8c13606bb08967f"} Jan 21 15:37:40 crc kubenswrapper[5021]: I0121 15:37:40.390143 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-7k5j6" event={"ID":"1ed1517c-6c3c-48a1-8caf-aac692b8b088","Type":"ContainerStarted","Data":"0f556e49d5420d905a8abd851cac2004cba1714ad2343c83a4e0e0f06aa3f3eb"} Jan 21 15:37:40 crc kubenswrapper[5021]: I0121 15:37:40.390296 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6968d8fdc4-7k5j6" Jan 21 15:37:40 crc kubenswrapper[5021]: I0121 15:37:40.667139 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/81ddebd8-1787-4b73-a670-11b1d0686123-memberlist\") pod \"speaker-zpz26\" (UID: \"81ddebd8-1787-4b73-a670-11b1d0686123\") " pod="metallb-system/speaker-zpz26" Jan 21 15:37:40 crc kubenswrapper[5021]: I0121 15:37:40.681965 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/81ddebd8-1787-4b73-a670-11b1d0686123-memberlist\") pod \"speaker-zpz26\" (UID: \"81ddebd8-1787-4b73-a670-11b1d0686123\") " pod="metallb-system/speaker-zpz26" Jan 21 15:37:40 crc kubenswrapper[5021]: I0121 15:37:40.774242 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-zpz26" Jan 21 15:37:41 crc kubenswrapper[5021]: I0121 15:37:41.396362 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-zpz26" event={"ID":"81ddebd8-1787-4b73-a670-11b1d0686123","Type":"ContainerStarted","Data":"299ed339a4fc6c6824db03e68eae51553ccf486adebde541d9ab57d4aeb98bf8"} Jan 21 15:37:42 crc kubenswrapper[5021]: I0121 15:37:42.358499 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 15:37:42 crc kubenswrapper[5021]: I0121 15:37:42.358924 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 15:37:42 crc kubenswrapper[5021]: I0121 15:37:42.358974 5021 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" Jan 21 15:37:42 crc kubenswrapper[5021]: I0121 15:37:42.359588 5021 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"430f8628012005aeb399f1f5973bccc495f1ff35af45c0e956eec98511b34a03"} pod="openshift-machine-config-operator/machine-config-daemon-n22xz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 21 15:37:42 crc kubenswrapper[5021]: I0121 15:37:42.359646 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" containerID="cri-o://430f8628012005aeb399f1f5973bccc495f1ff35af45c0e956eec98511b34a03" gracePeriod=600 Jan 21 15:37:42 crc kubenswrapper[5021]: I0121 15:37:42.405870 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-zpz26" event={"ID":"81ddebd8-1787-4b73-a670-11b1d0686123","Type":"ContainerStarted","Data":"edae58f1ec7920b245088fa0d551483a831511a52f2df53413aa88095ceecb2f"} Jan 21 15:37:43 crc kubenswrapper[5021]: I0121 15:37:43.415063 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-zpz26" event={"ID":"81ddebd8-1787-4b73-a670-11b1d0686123","Type":"ContainerStarted","Data":"f3e1ccd22906e5f257a0de17cb2810164428eee2cca4df163f9c34dc5a8e5ed3"} Jan 21 15:37:43 crc kubenswrapper[5021]: I0121 15:37:43.417774 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-zpz26" Jan 21 15:37:43 crc kubenswrapper[5021]: I0121 15:37:43.421263 5021 generic.go:334] "Generic (PLEG): container finished" podID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerID="430f8628012005aeb399f1f5973bccc495f1ff35af45c0e956eec98511b34a03" exitCode=0 Jan 21 15:37:43 crc kubenswrapper[5021]: I0121 15:37:43.421281 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" event={"ID":"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1","Type":"ContainerDied","Data":"430f8628012005aeb399f1f5973bccc495f1ff35af45c0e956eec98511b34a03"} Jan 21 15:37:43 crc kubenswrapper[5021]: I0121 15:37:43.421333 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" event={"ID":"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1","Type":"ContainerStarted","Data":"2ea8d15572e39e256e507de01c714d92322ce002bb3c73880321feefeec92859"} Jan 21 15:37:43 crc kubenswrapper[5021]: I0121 15:37:43.421355 5021 scope.go:117] "RemoveContainer" containerID="9223686c7e9f3e200942601b9f2ab70d47dcf8ddc5c6e0c3f6d57943b68fa733" Jan 21 15:37:43 crc kubenswrapper[5021]: I0121 15:37:43.448468 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-zpz26" podStartSLOduration=5.448447573 podStartE2EDuration="5.448447573s" podCreationTimestamp="2026-01-21 15:37:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:37:43.445055874 +0000 UTC m=+804.980169783" watchObservedRunningTime="2026-01-21 15:37:43.448447573 +0000 UTC m=+804.983561462" Jan 21 15:37:43 crc kubenswrapper[5021]: I0121 15:37:43.455324 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6968d8fdc4-7k5j6" podStartSLOduration=5.455297421 podStartE2EDuration="5.455297421s" podCreationTimestamp="2026-01-21 15:37:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:37:40.410003744 +0000 UTC m=+801.945117633" watchObservedRunningTime="2026-01-21 15:37:43.455297421 +0000 UTC m=+804.990411320" Jan 21 15:37:48 crc kubenswrapper[5021]: I0121 15:37:48.462929 5021 generic.go:334] "Generic (PLEG): container finished" podID="97b76490-a49c-4ddc-b6ba-7fbda7094851" containerID="11f26ddb20b4f26ec17bfa24572eea54feed9df89e578bab99a2b56ccca95111" exitCode=0 Jan 21 15:37:48 crc kubenswrapper[5021]: I0121 15:37:48.463025 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-ld56k" event={"ID":"97b76490-a49c-4ddc-b6ba-7fbda7094851","Type":"ContainerDied","Data":"11f26ddb20b4f26ec17bfa24572eea54feed9df89e578bab99a2b56ccca95111"} Jan 21 15:37:48 crc kubenswrapper[5021]: I0121 15:37:48.464753 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-cctjf" event={"ID":"64c81441-7b12-4e6d-9c22-08692b3e61e2","Type":"ContainerStarted","Data":"2b5f66c8cfabec458443f3c2f48b952da2802f25d2ba42c72c4b9ea6b8600bca"} Jan 21 15:37:48 crc kubenswrapper[5021]: I0121 15:37:48.464863 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-cctjf" Jan 21 15:37:49 crc kubenswrapper[5021]: I0121 15:37:49.312075 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6968d8fdc4-7k5j6" Jan 21 15:37:49 crc kubenswrapper[5021]: I0121 15:37:49.326785 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-cctjf" podStartSLOduration=3.196598669 podStartE2EDuration="11.326768136s" podCreationTimestamp="2026-01-21 15:37:38 +0000 UTC" firstStartedPulling="2026-01-21 15:37:40.129149816 +0000 UTC m=+801.664263705" lastFinishedPulling="2026-01-21 15:37:48.259319283 +0000 UTC m=+809.794433172" observedRunningTime="2026-01-21 15:37:48.49907926 +0000 UTC m=+810.034193149" watchObservedRunningTime="2026-01-21 15:37:49.326768136 +0000 UTC m=+810.861882025" Jan 21 15:37:49 crc kubenswrapper[5021]: I0121 15:37:49.478086 5021 generic.go:334] "Generic (PLEG): container finished" podID="97b76490-a49c-4ddc-b6ba-7fbda7094851" containerID="c8a47c7cb320df216145721df5f7cf4407025f8617b721f68c6d921e5afa2b50" exitCode=0 Jan 21 15:37:49 crc kubenswrapper[5021]: I0121 15:37:49.478169 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-ld56k" event={"ID":"97b76490-a49c-4ddc-b6ba-7fbda7094851","Type":"ContainerDied","Data":"c8a47c7cb320df216145721df5f7cf4407025f8617b721f68c6d921e5afa2b50"} Jan 21 15:37:50 crc kubenswrapper[5021]: I0121 15:37:50.489191 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-ld56k" event={"ID":"97b76490-a49c-4ddc-b6ba-7fbda7094851","Type":"ContainerStarted","Data":"69ddae82fb1b21cdd8cc35880b3aef23a902a1c96342987aff9766667279119e"} Jan 21 15:37:51 crc kubenswrapper[5021]: I0121 15:37:51.497515 5021 generic.go:334] "Generic (PLEG): container finished" podID="97b76490-a49c-4ddc-b6ba-7fbda7094851" containerID="69ddae82fb1b21cdd8cc35880b3aef23a902a1c96342987aff9766667279119e" exitCode=0 Jan 21 15:37:51 crc kubenswrapper[5021]: I0121 15:37:51.497628 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-ld56k" event={"ID":"97b76490-a49c-4ddc-b6ba-7fbda7094851","Type":"ContainerDied","Data":"69ddae82fb1b21cdd8cc35880b3aef23a902a1c96342987aff9766667279119e"} Jan 21 15:37:53 crc kubenswrapper[5021]: I0121 15:37:53.518651 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-ld56k" event={"ID":"97b76490-a49c-4ddc-b6ba-7fbda7094851","Type":"ContainerStarted","Data":"635f3ccabb8b2e7e191640db6e064bebd84d8720eab67b282a32a13ce18a165a"} Jan 21 15:37:53 crc kubenswrapper[5021]: I0121 15:37:53.519085 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-ld56k" event={"ID":"97b76490-a49c-4ddc-b6ba-7fbda7094851","Type":"ContainerStarted","Data":"03a23a4189ca9791c08f56ed22a0ad249bdece87e463cdfc1384297ec0cd1b1f"} Jan 21 15:37:53 crc kubenswrapper[5021]: I0121 15:37:53.519101 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-ld56k" event={"ID":"97b76490-a49c-4ddc-b6ba-7fbda7094851","Type":"ContainerStarted","Data":"05588b568b3154152db84f4805c601054873d8448c00cda4f5c5404d0dde80ae"} Jan 21 15:37:54 crc kubenswrapper[5021]: I0121 15:37:54.534013 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-ld56k" event={"ID":"97b76490-a49c-4ddc-b6ba-7fbda7094851","Type":"ContainerStarted","Data":"8a285a0e9f582a9c7237a549879e2c3d9c4c35418f6b49f267f891923abe9572"} Jan 21 15:37:54 crc kubenswrapper[5021]: I0121 15:37:54.534512 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-ld56k" event={"ID":"97b76490-a49c-4ddc-b6ba-7fbda7094851","Type":"ContainerStarted","Data":"4a6465fad6910d064508e22ae41ccc7c0e37e91d9c2631321a9149898c7aa03d"} Jan 21 15:37:55 crc kubenswrapper[5021]: I0121 15:37:55.562463 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-ld56k" event={"ID":"97b76490-a49c-4ddc-b6ba-7fbda7094851","Type":"ContainerStarted","Data":"de063f6604153bfa3d3105e39f6274501a8c96d2332ed3a4e35cf02f2de772b2"} Jan 21 15:37:55 crc kubenswrapper[5021]: I0121 15:37:55.563065 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-ld56k" Jan 21 15:37:55 crc kubenswrapper[5021]: I0121 15:37:55.590567 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-ld56k" podStartSLOduration=8.667269631 podStartE2EDuration="17.590551173s" podCreationTimestamp="2026-01-21 15:37:38 +0000 UTC" firstStartedPulling="2026-01-21 15:37:39.320370633 +0000 UTC m=+800.855484522" lastFinishedPulling="2026-01-21 15:37:48.243652175 +0000 UTC m=+809.778766064" observedRunningTime="2026-01-21 15:37:55.586042096 +0000 UTC m=+817.121155985" watchObservedRunningTime="2026-01-21 15:37:55.590551173 +0000 UTC m=+817.125665062" Jan 21 15:37:59 crc kubenswrapper[5021]: I0121 15:37:59.186203 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-ld56k" Jan 21 15:37:59 crc kubenswrapper[5021]: I0121 15:37:59.222420 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-ld56k" Jan 21 15:37:59 crc kubenswrapper[5021]: I0121 15:37:59.809123 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-cctjf" Jan 21 15:38:00 crc kubenswrapper[5021]: I0121 15:38:00.779222 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-zpz26" Jan 21 15:38:02 crc kubenswrapper[5021]: I0121 15:38:02.238735 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afkpt4"] Jan 21 15:38:02 crc kubenswrapper[5021]: I0121 15:38:02.240283 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afkpt4" Jan 21 15:38:02 crc kubenswrapper[5021]: I0121 15:38:02.243274 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 21 15:38:02 crc kubenswrapper[5021]: I0121 15:38:02.263990 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afkpt4"] Jan 21 15:38:02 crc kubenswrapper[5021]: I0121 15:38:02.287218 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d71db597-eb63-47d5-a55f-f3951b03ff6f-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afkpt4\" (UID: \"d71db597-eb63-47d5-a55f-f3951b03ff6f\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afkpt4" Jan 21 15:38:02 crc kubenswrapper[5021]: I0121 15:38:02.287320 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d71db597-eb63-47d5-a55f-f3951b03ff6f-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afkpt4\" (UID: \"d71db597-eb63-47d5-a55f-f3951b03ff6f\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afkpt4" Jan 21 15:38:02 crc kubenswrapper[5021]: I0121 15:38:02.287389 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dx69m\" (UniqueName: \"kubernetes.io/projected/d71db597-eb63-47d5-a55f-f3951b03ff6f-kube-api-access-dx69m\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afkpt4\" (UID: \"d71db597-eb63-47d5-a55f-f3951b03ff6f\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afkpt4" Jan 21 15:38:02 crc kubenswrapper[5021]: I0121 15:38:02.388932 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d71db597-eb63-47d5-a55f-f3951b03ff6f-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afkpt4\" (UID: \"d71db597-eb63-47d5-a55f-f3951b03ff6f\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afkpt4" Jan 21 15:38:02 crc kubenswrapper[5021]: I0121 15:38:02.389016 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d71db597-eb63-47d5-a55f-f3951b03ff6f-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afkpt4\" (UID: \"d71db597-eb63-47d5-a55f-f3951b03ff6f\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afkpt4" Jan 21 15:38:02 crc kubenswrapper[5021]: I0121 15:38:02.389048 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dx69m\" (UniqueName: \"kubernetes.io/projected/d71db597-eb63-47d5-a55f-f3951b03ff6f-kube-api-access-dx69m\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afkpt4\" (UID: \"d71db597-eb63-47d5-a55f-f3951b03ff6f\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afkpt4" Jan 21 15:38:02 crc kubenswrapper[5021]: I0121 15:38:02.389565 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d71db597-eb63-47d5-a55f-f3951b03ff6f-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afkpt4\" (UID: \"d71db597-eb63-47d5-a55f-f3951b03ff6f\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afkpt4" Jan 21 15:38:02 crc kubenswrapper[5021]: I0121 15:38:02.389768 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d71db597-eb63-47d5-a55f-f3951b03ff6f-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afkpt4\" (UID: \"d71db597-eb63-47d5-a55f-f3951b03ff6f\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afkpt4" Jan 21 15:38:02 crc kubenswrapper[5021]: I0121 15:38:02.415363 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dx69m\" (UniqueName: \"kubernetes.io/projected/d71db597-eb63-47d5-a55f-f3951b03ff6f-kube-api-access-dx69m\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afkpt4\" (UID: \"d71db597-eb63-47d5-a55f-f3951b03ff6f\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afkpt4" Jan 21 15:38:02 crc kubenswrapper[5021]: I0121 15:38:02.556466 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afkpt4" Jan 21 15:38:02 crc kubenswrapper[5021]: I0121 15:38:02.985478 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afkpt4"] Jan 21 15:38:02 crc kubenswrapper[5021]: W0121 15:38:02.991048 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd71db597_eb63_47d5_a55f_f3951b03ff6f.slice/crio-5fb4dd0fb579ba0fe3ad3512ef85e7162aa159285e02ccb1976a486d0ac6015c WatchSource:0}: Error finding container 5fb4dd0fb579ba0fe3ad3512ef85e7162aa159285e02ccb1976a486d0ac6015c: Status 404 returned error can't find the container with id 5fb4dd0fb579ba0fe3ad3512ef85e7162aa159285e02ccb1976a486d0ac6015c Jan 21 15:38:03 crc kubenswrapper[5021]: I0121 15:38:03.618068 5021 generic.go:334] "Generic (PLEG): container finished" podID="d71db597-eb63-47d5-a55f-f3951b03ff6f" containerID="be49c9ef2bd793779ae531cbedb68c68d33a583de31a2e50b846e5ff331235a9" exitCode=0 Jan 21 15:38:03 crc kubenswrapper[5021]: I0121 15:38:03.618220 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afkpt4" event={"ID":"d71db597-eb63-47d5-a55f-f3951b03ff6f","Type":"ContainerDied","Data":"be49c9ef2bd793779ae531cbedb68c68d33a583de31a2e50b846e5ff331235a9"} Jan 21 15:38:03 crc kubenswrapper[5021]: I0121 15:38:03.618431 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afkpt4" event={"ID":"d71db597-eb63-47d5-a55f-f3951b03ff6f","Type":"ContainerStarted","Data":"5fb4dd0fb579ba0fe3ad3512ef85e7162aa159285e02ccb1976a486d0ac6015c"} Jan 21 15:38:04 crc kubenswrapper[5021]: I0121 15:38:04.593538 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-s9czd"] Jan 21 15:38:04 crc kubenswrapper[5021]: I0121 15:38:04.595838 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-s9czd" Jan 21 15:38:04 crc kubenswrapper[5021]: I0121 15:38:04.608014 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-s9czd"] Jan 21 15:38:04 crc kubenswrapper[5021]: I0121 15:38:04.730076 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ghtnd\" (UniqueName: \"kubernetes.io/projected/66776b98-11bb-476c-a590-eff794626588-kube-api-access-ghtnd\") pod \"redhat-operators-s9czd\" (UID: \"66776b98-11bb-476c-a590-eff794626588\") " pod="openshift-marketplace/redhat-operators-s9czd" Jan 21 15:38:04 crc kubenswrapper[5021]: I0121 15:38:04.730141 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66776b98-11bb-476c-a590-eff794626588-utilities\") pod \"redhat-operators-s9czd\" (UID: \"66776b98-11bb-476c-a590-eff794626588\") " pod="openshift-marketplace/redhat-operators-s9czd" Jan 21 15:38:04 crc kubenswrapper[5021]: I0121 15:38:04.730170 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66776b98-11bb-476c-a590-eff794626588-catalog-content\") pod \"redhat-operators-s9czd\" (UID: \"66776b98-11bb-476c-a590-eff794626588\") " pod="openshift-marketplace/redhat-operators-s9czd" Jan 21 15:38:04 crc kubenswrapper[5021]: I0121 15:38:04.831847 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ghtnd\" (UniqueName: \"kubernetes.io/projected/66776b98-11bb-476c-a590-eff794626588-kube-api-access-ghtnd\") pod \"redhat-operators-s9czd\" (UID: \"66776b98-11bb-476c-a590-eff794626588\") " pod="openshift-marketplace/redhat-operators-s9czd" Jan 21 15:38:04 crc kubenswrapper[5021]: I0121 15:38:04.831904 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66776b98-11bb-476c-a590-eff794626588-utilities\") pod \"redhat-operators-s9czd\" (UID: \"66776b98-11bb-476c-a590-eff794626588\") " pod="openshift-marketplace/redhat-operators-s9czd" Jan 21 15:38:04 crc kubenswrapper[5021]: I0121 15:38:04.831941 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66776b98-11bb-476c-a590-eff794626588-catalog-content\") pod \"redhat-operators-s9czd\" (UID: \"66776b98-11bb-476c-a590-eff794626588\") " pod="openshift-marketplace/redhat-operators-s9czd" Jan 21 15:38:04 crc kubenswrapper[5021]: I0121 15:38:04.832592 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66776b98-11bb-476c-a590-eff794626588-utilities\") pod \"redhat-operators-s9czd\" (UID: \"66776b98-11bb-476c-a590-eff794626588\") " pod="openshift-marketplace/redhat-operators-s9czd" Jan 21 15:38:04 crc kubenswrapper[5021]: I0121 15:38:04.832679 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66776b98-11bb-476c-a590-eff794626588-catalog-content\") pod \"redhat-operators-s9czd\" (UID: \"66776b98-11bb-476c-a590-eff794626588\") " pod="openshift-marketplace/redhat-operators-s9czd" Jan 21 15:38:04 crc kubenswrapper[5021]: I0121 15:38:04.873964 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ghtnd\" (UniqueName: \"kubernetes.io/projected/66776b98-11bb-476c-a590-eff794626588-kube-api-access-ghtnd\") pod \"redhat-operators-s9czd\" (UID: \"66776b98-11bb-476c-a590-eff794626588\") " pod="openshift-marketplace/redhat-operators-s9czd" Jan 21 15:38:04 crc kubenswrapper[5021]: I0121 15:38:04.937239 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-s9czd" Jan 21 15:38:05 crc kubenswrapper[5021]: I0121 15:38:05.207406 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-s9czd"] Jan 21 15:38:05 crc kubenswrapper[5021]: I0121 15:38:05.641977 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-s9czd" event={"ID":"66776b98-11bb-476c-a590-eff794626588","Type":"ContainerStarted","Data":"6626102cacac127197e1bf788ada05577704e7c7cc2b44b98b555a583a67c55d"} Jan 21 15:38:06 crc kubenswrapper[5021]: I0121 15:38:06.651694 5021 generic.go:334] "Generic (PLEG): container finished" podID="66776b98-11bb-476c-a590-eff794626588" containerID="65dd59cd6a81b92ab4dbef2ff5534f818c1a41061868296e35b1a1f4ab368351" exitCode=0 Jan 21 15:38:06 crc kubenswrapper[5021]: I0121 15:38:06.651780 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-s9czd" event={"ID":"66776b98-11bb-476c-a590-eff794626588","Type":"ContainerDied","Data":"65dd59cd6a81b92ab4dbef2ff5534f818c1a41061868296e35b1a1f4ab368351"} Jan 21 15:38:09 crc kubenswrapper[5021]: I0121 15:38:09.190701 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-ld56k" Jan 21 15:38:11 crc kubenswrapper[5021]: I0121 15:38:11.697952 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-s9czd" event={"ID":"66776b98-11bb-476c-a590-eff794626588","Type":"ContainerStarted","Data":"84dcf7c2cdf76b72dd7f10db35fa7e657cfef47bd7f148b7318a41b515ea7af7"} Jan 21 15:38:11 crc kubenswrapper[5021]: I0121 15:38:11.702375 5021 generic.go:334] "Generic (PLEG): container finished" podID="d71db597-eb63-47d5-a55f-f3951b03ff6f" containerID="dcd4f61685bc4ef41f168250a7729a46b341a78c4e33b8a96255bd69defb1bee" exitCode=0 Jan 21 15:38:11 crc kubenswrapper[5021]: I0121 15:38:11.702425 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afkpt4" event={"ID":"d71db597-eb63-47d5-a55f-f3951b03ff6f","Type":"ContainerDied","Data":"dcd4f61685bc4ef41f168250a7729a46b341a78c4e33b8a96255bd69defb1bee"} Jan 21 15:38:12 crc kubenswrapper[5021]: I0121 15:38:12.720878 5021 generic.go:334] "Generic (PLEG): container finished" podID="d71db597-eb63-47d5-a55f-f3951b03ff6f" containerID="a4fc8e557eef235b43cd37043a2d6d5204a578259ffae748b909cbc6a3546457" exitCode=0 Jan 21 15:38:12 crc kubenswrapper[5021]: I0121 15:38:12.720972 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afkpt4" event={"ID":"d71db597-eb63-47d5-a55f-f3951b03ff6f","Type":"ContainerDied","Data":"a4fc8e557eef235b43cd37043a2d6d5204a578259ffae748b909cbc6a3546457"} Jan 21 15:38:12 crc kubenswrapper[5021]: I0121 15:38:12.723590 5021 generic.go:334] "Generic (PLEG): container finished" podID="66776b98-11bb-476c-a590-eff794626588" containerID="84dcf7c2cdf76b72dd7f10db35fa7e657cfef47bd7f148b7318a41b515ea7af7" exitCode=0 Jan 21 15:38:12 crc kubenswrapper[5021]: I0121 15:38:12.723637 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-s9czd" event={"ID":"66776b98-11bb-476c-a590-eff794626588","Type":"ContainerDied","Data":"84dcf7c2cdf76b72dd7f10db35fa7e657cfef47bd7f148b7318a41b515ea7af7"} Jan 21 15:38:14 crc kubenswrapper[5021]: I0121 15:38:14.068545 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afkpt4" Jan 21 15:38:14 crc kubenswrapper[5021]: I0121 15:38:14.166146 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d71db597-eb63-47d5-a55f-f3951b03ff6f-bundle\") pod \"d71db597-eb63-47d5-a55f-f3951b03ff6f\" (UID: \"d71db597-eb63-47d5-a55f-f3951b03ff6f\") " Jan 21 15:38:14 crc kubenswrapper[5021]: I0121 15:38:14.166309 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dx69m\" (UniqueName: \"kubernetes.io/projected/d71db597-eb63-47d5-a55f-f3951b03ff6f-kube-api-access-dx69m\") pod \"d71db597-eb63-47d5-a55f-f3951b03ff6f\" (UID: \"d71db597-eb63-47d5-a55f-f3951b03ff6f\") " Jan 21 15:38:14 crc kubenswrapper[5021]: I0121 15:38:14.166381 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d71db597-eb63-47d5-a55f-f3951b03ff6f-util\") pod \"d71db597-eb63-47d5-a55f-f3951b03ff6f\" (UID: \"d71db597-eb63-47d5-a55f-f3951b03ff6f\") " Jan 21 15:38:14 crc kubenswrapper[5021]: I0121 15:38:14.167322 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d71db597-eb63-47d5-a55f-f3951b03ff6f-bundle" (OuterVolumeSpecName: "bundle") pod "d71db597-eb63-47d5-a55f-f3951b03ff6f" (UID: "d71db597-eb63-47d5-a55f-f3951b03ff6f"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:38:14 crc kubenswrapper[5021]: I0121 15:38:14.173133 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d71db597-eb63-47d5-a55f-f3951b03ff6f-kube-api-access-dx69m" (OuterVolumeSpecName: "kube-api-access-dx69m") pod "d71db597-eb63-47d5-a55f-f3951b03ff6f" (UID: "d71db597-eb63-47d5-a55f-f3951b03ff6f"). InnerVolumeSpecName "kube-api-access-dx69m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:38:14 crc kubenswrapper[5021]: I0121 15:38:14.179647 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d71db597-eb63-47d5-a55f-f3951b03ff6f-util" (OuterVolumeSpecName: "util") pod "d71db597-eb63-47d5-a55f-f3951b03ff6f" (UID: "d71db597-eb63-47d5-a55f-f3951b03ff6f"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:38:14 crc kubenswrapper[5021]: I0121 15:38:14.267847 5021 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d71db597-eb63-47d5-a55f-f3951b03ff6f-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:38:14 crc kubenswrapper[5021]: I0121 15:38:14.267887 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dx69m\" (UniqueName: \"kubernetes.io/projected/d71db597-eb63-47d5-a55f-f3951b03ff6f-kube-api-access-dx69m\") on node \"crc\" DevicePath \"\"" Jan 21 15:38:14 crc kubenswrapper[5021]: I0121 15:38:14.267898 5021 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d71db597-eb63-47d5-a55f-f3951b03ff6f-util\") on node \"crc\" DevicePath \"\"" Jan 21 15:38:14 crc kubenswrapper[5021]: I0121 15:38:14.743376 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afkpt4" Jan 21 15:38:14 crc kubenswrapper[5021]: I0121 15:38:14.754157 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afkpt4" event={"ID":"d71db597-eb63-47d5-a55f-f3951b03ff6f","Type":"ContainerDied","Data":"5fb4dd0fb579ba0fe3ad3512ef85e7162aa159285e02ccb1976a486d0ac6015c"} Jan 21 15:38:14 crc kubenswrapper[5021]: I0121 15:38:14.754225 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5fb4dd0fb579ba0fe3ad3512ef85e7162aa159285e02ccb1976a486d0ac6015c" Jan 21 15:38:16 crc kubenswrapper[5021]: I0121 15:38:16.757221 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-s9czd" event={"ID":"66776b98-11bb-476c-a590-eff794626588","Type":"ContainerStarted","Data":"e669a79dad0e94d30cfdb4cbb1a9d6a1081445ac0facc705c51b70e2e17df82e"} Jan 21 15:38:16 crc kubenswrapper[5021]: I0121 15:38:16.781655 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-s9czd" podStartSLOduration=3.61931868 podStartE2EDuration="12.781636099s" podCreationTimestamp="2026-01-21 15:38:04 +0000 UTC" firstStartedPulling="2026-01-21 15:38:06.653259869 +0000 UTC m=+828.188373758" lastFinishedPulling="2026-01-21 15:38:15.815577288 +0000 UTC m=+837.350691177" observedRunningTime="2026-01-21 15:38:16.777690156 +0000 UTC m=+838.312804065" watchObservedRunningTime="2026-01-21 15:38:16.781636099 +0000 UTC m=+838.316749988" Jan 21 15:38:20 crc kubenswrapper[5021]: I0121 15:38:20.249879 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-p8xwn"] Jan 21 15:38:20 crc kubenswrapper[5021]: E0121 15:38:20.250771 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d71db597-eb63-47d5-a55f-f3951b03ff6f" containerName="util" Jan 21 15:38:20 crc kubenswrapper[5021]: I0121 15:38:20.250787 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="d71db597-eb63-47d5-a55f-f3951b03ff6f" containerName="util" Jan 21 15:38:20 crc kubenswrapper[5021]: E0121 15:38:20.250798 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d71db597-eb63-47d5-a55f-f3951b03ff6f" containerName="pull" Jan 21 15:38:20 crc kubenswrapper[5021]: I0121 15:38:20.250806 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="d71db597-eb63-47d5-a55f-f3951b03ff6f" containerName="pull" Jan 21 15:38:20 crc kubenswrapper[5021]: E0121 15:38:20.250819 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d71db597-eb63-47d5-a55f-f3951b03ff6f" containerName="extract" Jan 21 15:38:20 crc kubenswrapper[5021]: I0121 15:38:20.250827 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="d71db597-eb63-47d5-a55f-f3951b03ff6f" containerName="extract" Jan 21 15:38:20 crc kubenswrapper[5021]: I0121 15:38:20.250983 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="d71db597-eb63-47d5-a55f-f3951b03ff6f" containerName="extract" Jan 21 15:38:20 crc kubenswrapper[5021]: I0121 15:38:20.251525 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-p8xwn" Jan 21 15:38:20 crc kubenswrapper[5021]: I0121 15:38:20.256145 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"kube-root-ca.crt" Jan 21 15:38:20 crc kubenswrapper[5021]: I0121 15:38:20.256466 5021 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager-operator"/"cert-manager-operator-controller-manager-dockercfg-8cx4v" Jan 21 15:38:20 crc kubenswrapper[5021]: I0121 15:38:20.257116 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"openshift-service-ca.crt" Jan 21 15:38:20 crc kubenswrapper[5021]: I0121 15:38:20.294652 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-p8xwn"] Jan 21 15:38:20 crc kubenswrapper[5021]: I0121 15:38:20.357578 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/c13363d2-2dd8-4055-9779-a5583e152a3b-tmp\") pod \"cert-manager-operator-controller-manager-64cf6dff88-p8xwn\" (UID: \"c13363d2-2dd8-4055-9779-a5583e152a3b\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-p8xwn" Jan 21 15:38:20 crc kubenswrapper[5021]: I0121 15:38:20.357655 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sfnjr\" (UniqueName: \"kubernetes.io/projected/c13363d2-2dd8-4055-9779-a5583e152a3b-kube-api-access-sfnjr\") pod \"cert-manager-operator-controller-manager-64cf6dff88-p8xwn\" (UID: \"c13363d2-2dd8-4055-9779-a5583e152a3b\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-p8xwn" Jan 21 15:38:20 crc kubenswrapper[5021]: I0121 15:38:20.459385 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/c13363d2-2dd8-4055-9779-a5583e152a3b-tmp\") pod \"cert-manager-operator-controller-manager-64cf6dff88-p8xwn\" (UID: \"c13363d2-2dd8-4055-9779-a5583e152a3b\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-p8xwn" Jan 21 15:38:20 crc kubenswrapper[5021]: I0121 15:38:20.459478 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sfnjr\" (UniqueName: \"kubernetes.io/projected/c13363d2-2dd8-4055-9779-a5583e152a3b-kube-api-access-sfnjr\") pod \"cert-manager-operator-controller-manager-64cf6dff88-p8xwn\" (UID: \"c13363d2-2dd8-4055-9779-a5583e152a3b\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-p8xwn" Jan 21 15:38:20 crc kubenswrapper[5021]: I0121 15:38:20.460215 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/c13363d2-2dd8-4055-9779-a5583e152a3b-tmp\") pod \"cert-manager-operator-controller-manager-64cf6dff88-p8xwn\" (UID: \"c13363d2-2dd8-4055-9779-a5583e152a3b\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-p8xwn" Jan 21 15:38:20 crc kubenswrapper[5021]: I0121 15:38:20.483116 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sfnjr\" (UniqueName: \"kubernetes.io/projected/c13363d2-2dd8-4055-9779-a5583e152a3b-kube-api-access-sfnjr\") pod \"cert-manager-operator-controller-manager-64cf6dff88-p8xwn\" (UID: \"c13363d2-2dd8-4055-9779-a5583e152a3b\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-p8xwn" Jan 21 15:38:20 crc kubenswrapper[5021]: I0121 15:38:20.602575 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-p8xwn" Jan 21 15:38:21 crc kubenswrapper[5021]: I0121 15:38:21.071541 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-p8xwn"] Jan 21 15:38:21 crc kubenswrapper[5021]: I0121 15:38:21.786941 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-p8xwn" event={"ID":"c13363d2-2dd8-4055-9779-a5583e152a3b","Type":"ContainerStarted","Data":"55a989395cb565dd7e03d52f16b785b00f00cef5f8641960e606c3fb1a946240"} Jan 21 15:38:24 crc kubenswrapper[5021]: I0121 15:38:24.937697 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-s9czd" Jan 21 15:38:24 crc kubenswrapper[5021]: I0121 15:38:24.938119 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-s9czd" Jan 21 15:38:25 crc kubenswrapper[5021]: I0121 15:38:25.005514 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-s9czd" Jan 21 15:38:25 crc kubenswrapper[5021]: I0121 15:38:25.854474 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-s9czd" Jan 21 15:38:27 crc kubenswrapper[5021]: I0121 15:38:27.391079 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-s9czd"] Jan 21 15:38:27 crc kubenswrapper[5021]: I0121 15:38:27.829895 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-s9czd" podUID="66776b98-11bb-476c-a590-eff794626588" containerName="registry-server" containerID="cri-o://e669a79dad0e94d30cfdb4cbb1a9d6a1081445ac0facc705c51b70e2e17df82e" gracePeriod=2 Jan 21 15:38:28 crc kubenswrapper[5021]: I0121 15:38:28.839062 5021 generic.go:334] "Generic (PLEG): container finished" podID="66776b98-11bb-476c-a590-eff794626588" containerID="e669a79dad0e94d30cfdb4cbb1a9d6a1081445ac0facc705c51b70e2e17df82e" exitCode=0 Jan 21 15:38:28 crc kubenswrapper[5021]: I0121 15:38:28.839140 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-s9czd" event={"ID":"66776b98-11bb-476c-a590-eff794626588","Type":"ContainerDied","Data":"e669a79dad0e94d30cfdb4cbb1a9d6a1081445ac0facc705c51b70e2e17df82e"} Jan 21 15:38:32 crc kubenswrapper[5021]: I0121 15:38:32.681090 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-s9czd" Jan 21 15:38:32 crc kubenswrapper[5021]: I0121 15:38:32.730751 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66776b98-11bb-476c-a590-eff794626588-utilities\") pod \"66776b98-11bb-476c-a590-eff794626588\" (UID: \"66776b98-11bb-476c-a590-eff794626588\") " Jan 21 15:38:32 crc kubenswrapper[5021]: I0121 15:38:32.730795 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66776b98-11bb-476c-a590-eff794626588-catalog-content\") pod \"66776b98-11bb-476c-a590-eff794626588\" (UID: \"66776b98-11bb-476c-a590-eff794626588\") " Jan 21 15:38:32 crc kubenswrapper[5021]: I0121 15:38:32.730847 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ghtnd\" (UniqueName: \"kubernetes.io/projected/66776b98-11bb-476c-a590-eff794626588-kube-api-access-ghtnd\") pod \"66776b98-11bb-476c-a590-eff794626588\" (UID: \"66776b98-11bb-476c-a590-eff794626588\") " Jan 21 15:38:32 crc kubenswrapper[5021]: I0121 15:38:32.736579 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/66776b98-11bb-476c-a590-eff794626588-kube-api-access-ghtnd" (OuterVolumeSpecName: "kube-api-access-ghtnd") pod "66776b98-11bb-476c-a590-eff794626588" (UID: "66776b98-11bb-476c-a590-eff794626588"). InnerVolumeSpecName "kube-api-access-ghtnd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:38:32 crc kubenswrapper[5021]: I0121 15:38:32.742035 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/66776b98-11bb-476c-a590-eff794626588-utilities" (OuterVolumeSpecName: "utilities") pod "66776b98-11bb-476c-a590-eff794626588" (UID: "66776b98-11bb-476c-a590-eff794626588"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:38:32 crc kubenswrapper[5021]: I0121 15:38:32.832821 5021 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66776b98-11bb-476c-a590-eff794626588-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 15:38:32 crc kubenswrapper[5021]: I0121 15:38:32.832868 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ghtnd\" (UniqueName: \"kubernetes.io/projected/66776b98-11bb-476c-a590-eff794626588-kube-api-access-ghtnd\") on node \"crc\" DevicePath \"\"" Jan 21 15:38:32 crc kubenswrapper[5021]: I0121 15:38:32.857682 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/66776b98-11bb-476c-a590-eff794626588-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "66776b98-11bb-476c-a590-eff794626588" (UID: "66776b98-11bb-476c-a590-eff794626588"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:38:32 crc kubenswrapper[5021]: I0121 15:38:32.872249 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-s9czd" Jan 21 15:38:32 crc kubenswrapper[5021]: I0121 15:38:32.872240 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-s9czd" event={"ID":"66776b98-11bb-476c-a590-eff794626588","Type":"ContainerDied","Data":"6626102cacac127197e1bf788ada05577704e7c7cc2b44b98b555a583a67c55d"} Jan 21 15:38:32 crc kubenswrapper[5021]: I0121 15:38:32.872413 5021 scope.go:117] "RemoveContainer" containerID="e669a79dad0e94d30cfdb4cbb1a9d6a1081445ac0facc705c51b70e2e17df82e" Jan 21 15:38:32 crc kubenswrapper[5021]: I0121 15:38:32.878638 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-p8xwn" event={"ID":"c13363d2-2dd8-4055-9779-a5583e152a3b","Type":"ContainerStarted","Data":"9f9331ceb184c179931e46f0304151338cc754bdd7141090fab7ada69f42c40d"} Jan 21 15:38:32 crc kubenswrapper[5021]: I0121 15:38:32.905350 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-p8xwn" podStartSLOduration=1.5254572720000001 podStartE2EDuration="12.905327613s" podCreationTimestamp="2026-01-21 15:38:20 +0000 UTC" firstStartedPulling="2026-01-21 15:38:21.083270751 +0000 UTC m=+842.618384640" lastFinishedPulling="2026-01-21 15:38:32.463141092 +0000 UTC m=+853.998254981" observedRunningTime="2026-01-21 15:38:32.899716497 +0000 UTC m=+854.434830396" watchObservedRunningTime="2026-01-21 15:38:32.905327613 +0000 UTC m=+854.440441502" Jan 21 15:38:32 crc kubenswrapper[5021]: I0121 15:38:32.905710 5021 scope.go:117] "RemoveContainer" containerID="84dcf7c2cdf76b72dd7f10db35fa7e657cfef47bd7f148b7318a41b515ea7af7" Jan 21 15:38:32 crc kubenswrapper[5021]: I0121 15:38:32.934343 5021 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66776b98-11bb-476c-a590-eff794626588-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 15:38:32 crc kubenswrapper[5021]: I0121 15:38:32.937266 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-s9czd"] Jan 21 15:38:32 crc kubenswrapper[5021]: I0121 15:38:32.943328 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-s9czd"] Jan 21 15:38:32 crc kubenswrapper[5021]: I0121 15:38:32.952112 5021 scope.go:117] "RemoveContainer" containerID="65dd59cd6a81b92ab4dbef2ff5534f818c1a41061868296e35b1a1f4ab368351" Jan 21 15:38:34 crc kubenswrapper[5021]: I0121 15:38:34.746474 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="66776b98-11bb-476c-a590-eff794626588" path="/var/lib/kubelet/pods/66776b98-11bb-476c-a590-eff794626588/volumes" Jan 21 15:38:36 crc kubenswrapper[5021]: I0121 15:38:36.198120 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-chdrq"] Jan 21 15:38:36 crc kubenswrapper[5021]: E0121 15:38:36.198628 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66776b98-11bb-476c-a590-eff794626588" containerName="extract-content" Jan 21 15:38:36 crc kubenswrapper[5021]: I0121 15:38:36.198647 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="66776b98-11bb-476c-a590-eff794626588" containerName="extract-content" Jan 21 15:38:36 crc kubenswrapper[5021]: E0121 15:38:36.198664 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66776b98-11bb-476c-a590-eff794626588" containerName="extract-utilities" Jan 21 15:38:36 crc kubenswrapper[5021]: I0121 15:38:36.198672 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="66776b98-11bb-476c-a590-eff794626588" containerName="extract-utilities" Jan 21 15:38:36 crc kubenswrapper[5021]: E0121 15:38:36.198693 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66776b98-11bb-476c-a590-eff794626588" containerName="registry-server" Jan 21 15:38:36 crc kubenswrapper[5021]: I0121 15:38:36.198702 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="66776b98-11bb-476c-a590-eff794626588" containerName="registry-server" Jan 21 15:38:36 crc kubenswrapper[5021]: I0121 15:38:36.198850 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="66776b98-11bb-476c-a590-eff794626588" containerName="registry-server" Jan 21 15:38:36 crc kubenswrapper[5021]: I0121 15:38:36.199487 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-f4fb5df64-chdrq" Jan 21 15:38:36 crc kubenswrapper[5021]: I0121 15:38:36.202113 5021 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-vbv66" Jan 21 15:38:36 crc kubenswrapper[5021]: I0121 15:38:36.202258 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Jan 21 15:38:36 crc kubenswrapper[5021]: I0121 15:38:36.202285 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Jan 21 15:38:36 crc kubenswrapper[5021]: I0121 15:38:36.214064 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-chdrq"] Jan 21 15:38:36 crc kubenswrapper[5021]: I0121 15:38:36.280712 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/f1b5f0fa-4a66-443b-9178-8f51aee84d1f-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-chdrq\" (UID: \"f1b5f0fa-4a66-443b-9178-8f51aee84d1f\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-chdrq" Jan 21 15:38:36 crc kubenswrapper[5021]: I0121 15:38:36.280793 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d8sl6\" (UniqueName: \"kubernetes.io/projected/f1b5f0fa-4a66-443b-9178-8f51aee84d1f-kube-api-access-d8sl6\") pod \"cert-manager-webhook-f4fb5df64-chdrq\" (UID: \"f1b5f0fa-4a66-443b-9178-8f51aee84d1f\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-chdrq" Jan 21 15:38:36 crc kubenswrapper[5021]: I0121 15:38:36.382485 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/f1b5f0fa-4a66-443b-9178-8f51aee84d1f-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-chdrq\" (UID: \"f1b5f0fa-4a66-443b-9178-8f51aee84d1f\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-chdrq" Jan 21 15:38:36 crc kubenswrapper[5021]: I0121 15:38:36.382573 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d8sl6\" (UniqueName: \"kubernetes.io/projected/f1b5f0fa-4a66-443b-9178-8f51aee84d1f-kube-api-access-d8sl6\") pod \"cert-manager-webhook-f4fb5df64-chdrq\" (UID: \"f1b5f0fa-4a66-443b-9178-8f51aee84d1f\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-chdrq" Jan 21 15:38:36 crc kubenswrapper[5021]: I0121 15:38:36.401824 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/f1b5f0fa-4a66-443b-9178-8f51aee84d1f-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-chdrq\" (UID: \"f1b5f0fa-4a66-443b-9178-8f51aee84d1f\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-chdrq" Jan 21 15:38:36 crc kubenswrapper[5021]: I0121 15:38:36.404158 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d8sl6\" (UniqueName: \"kubernetes.io/projected/f1b5f0fa-4a66-443b-9178-8f51aee84d1f-kube-api-access-d8sl6\") pod \"cert-manager-webhook-f4fb5df64-chdrq\" (UID: \"f1b5f0fa-4a66-443b-9178-8f51aee84d1f\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-chdrq" Jan 21 15:38:36 crc kubenswrapper[5021]: I0121 15:38:36.522612 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-f4fb5df64-chdrq" Jan 21 15:38:36 crc kubenswrapper[5021]: I0121 15:38:36.928544 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-chdrq"] Jan 21 15:38:37 crc kubenswrapper[5021]: I0121 15:38:37.920710 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-f4fb5df64-chdrq" event={"ID":"f1b5f0fa-4a66-443b-9178-8f51aee84d1f","Type":"ContainerStarted","Data":"e324ea469772c0f09e28a64fec8996a1b85e9615cb95111fd754060855f6e824"} Jan 21 15:38:39 crc kubenswrapper[5021]: I0121 15:38:39.719373 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-ds76b"] Jan 21 15:38:39 crc kubenswrapper[5021]: I0121 15:38:39.720134 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-855d9ccff4-ds76b" Jan 21 15:38:39 crc kubenswrapper[5021]: I0121 15:38:39.722379 5021 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-tbqnp" Jan 21 15:38:39 crc kubenswrapper[5021]: I0121 15:38:39.739410 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-ds76b"] Jan 21 15:38:39 crc kubenswrapper[5021]: I0121 15:38:39.829830 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6fc31342-f27a-4bc1-8121-a283abb689fa-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-ds76b\" (UID: \"6fc31342-f27a-4bc1-8121-a283abb689fa\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-ds76b" Jan 21 15:38:39 crc kubenswrapper[5021]: I0121 15:38:39.829985 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rk82w\" (UniqueName: \"kubernetes.io/projected/6fc31342-f27a-4bc1-8121-a283abb689fa-kube-api-access-rk82w\") pod \"cert-manager-cainjector-855d9ccff4-ds76b\" (UID: \"6fc31342-f27a-4bc1-8121-a283abb689fa\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-ds76b" Jan 21 15:38:39 crc kubenswrapper[5021]: I0121 15:38:39.930749 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6fc31342-f27a-4bc1-8121-a283abb689fa-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-ds76b\" (UID: \"6fc31342-f27a-4bc1-8121-a283abb689fa\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-ds76b" Jan 21 15:38:39 crc kubenswrapper[5021]: I0121 15:38:39.930836 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rk82w\" (UniqueName: \"kubernetes.io/projected/6fc31342-f27a-4bc1-8121-a283abb689fa-kube-api-access-rk82w\") pod \"cert-manager-cainjector-855d9ccff4-ds76b\" (UID: \"6fc31342-f27a-4bc1-8121-a283abb689fa\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-ds76b" Jan 21 15:38:39 crc kubenswrapper[5021]: I0121 15:38:39.955956 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6fc31342-f27a-4bc1-8121-a283abb689fa-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-ds76b\" (UID: \"6fc31342-f27a-4bc1-8121-a283abb689fa\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-ds76b" Jan 21 15:38:39 crc kubenswrapper[5021]: I0121 15:38:39.968839 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rk82w\" (UniqueName: \"kubernetes.io/projected/6fc31342-f27a-4bc1-8121-a283abb689fa-kube-api-access-rk82w\") pod \"cert-manager-cainjector-855d9ccff4-ds76b\" (UID: \"6fc31342-f27a-4bc1-8121-a283abb689fa\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-ds76b" Jan 21 15:38:40 crc kubenswrapper[5021]: I0121 15:38:40.077619 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-855d9ccff4-ds76b" Jan 21 15:38:40 crc kubenswrapper[5021]: I0121 15:38:40.308726 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-ds76b"] Jan 21 15:38:40 crc kubenswrapper[5021]: W0121 15:38:40.315711 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6fc31342_f27a_4bc1_8121_a283abb689fa.slice/crio-38c481ebdbcb7850021d083751cf23efafbdf5d166ce24544b3f0720f436cc6f WatchSource:0}: Error finding container 38c481ebdbcb7850021d083751cf23efafbdf5d166ce24544b3f0720f436cc6f: Status 404 returned error can't find the container with id 38c481ebdbcb7850021d083751cf23efafbdf5d166ce24544b3f0720f436cc6f Jan 21 15:38:40 crc kubenswrapper[5021]: I0121 15:38:40.938236 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-855d9ccff4-ds76b" event={"ID":"6fc31342-f27a-4bc1-8121-a283abb689fa","Type":"ContainerStarted","Data":"38c481ebdbcb7850021d083751cf23efafbdf5d166ce24544b3f0720f436cc6f"} Jan 21 15:38:49 crc kubenswrapper[5021]: I0121 15:38:49.633236 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-86cb77c54b-sctx7"] Jan 21 15:38:49 crc kubenswrapper[5021]: I0121 15:38:49.635019 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-86cb77c54b-sctx7" Jan 21 15:38:49 crc kubenswrapper[5021]: I0121 15:38:49.641030 5021 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-xvvh8" Jan 21 15:38:49 crc kubenswrapper[5021]: I0121 15:38:49.649788 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-86cb77c54b-sctx7"] Jan 21 15:38:49 crc kubenswrapper[5021]: I0121 15:38:49.711087 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nz6rc\" (UniqueName: \"kubernetes.io/projected/4e8ca7a2-523e-4c60-8032-020eed1d3acc-kube-api-access-nz6rc\") pod \"cert-manager-86cb77c54b-sctx7\" (UID: \"4e8ca7a2-523e-4c60-8032-020eed1d3acc\") " pod="cert-manager/cert-manager-86cb77c54b-sctx7" Jan 21 15:38:49 crc kubenswrapper[5021]: I0121 15:38:49.711188 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/4e8ca7a2-523e-4c60-8032-020eed1d3acc-bound-sa-token\") pod \"cert-manager-86cb77c54b-sctx7\" (UID: \"4e8ca7a2-523e-4c60-8032-020eed1d3acc\") " pod="cert-manager/cert-manager-86cb77c54b-sctx7" Jan 21 15:38:49 crc kubenswrapper[5021]: I0121 15:38:49.812184 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nz6rc\" (UniqueName: \"kubernetes.io/projected/4e8ca7a2-523e-4c60-8032-020eed1d3acc-kube-api-access-nz6rc\") pod \"cert-manager-86cb77c54b-sctx7\" (UID: \"4e8ca7a2-523e-4c60-8032-020eed1d3acc\") " pod="cert-manager/cert-manager-86cb77c54b-sctx7" Jan 21 15:38:49 crc kubenswrapper[5021]: I0121 15:38:49.812245 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/4e8ca7a2-523e-4c60-8032-020eed1d3acc-bound-sa-token\") pod \"cert-manager-86cb77c54b-sctx7\" (UID: \"4e8ca7a2-523e-4c60-8032-020eed1d3acc\") " pod="cert-manager/cert-manager-86cb77c54b-sctx7" Jan 21 15:38:49 crc kubenswrapper[5021]: I0121 15:38:49.840161 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/4e8ca7a2-523e-4c60-8032-020eed1d3acc-bound-sa-token\") pod \"cert-manager-86cb77c54b-sctx7\" (UID: \"4e8ca7a2-523e-4c60-8032-020eed1d3acc\") " pod="cert-manager/cert-manager-86cb77c54b-sctx7" Jan 21 15:38:49 crc kubenswrapper[5021]: I0121 15:38:49.840302 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nz6rc\" (UniqueName: \"kubernetes.io/projected/4e8ca7a2-523e-4c60-8032-020eed1d3acc-kube-api-access-nz6rc\") pod \"cert-manager-86cb77c54b-sctx7\" (UID: \"4e8ca7a2-523e-4c60-8032-020eed1d3acc\") " pod="cert-manager/cert-manager-86cb77c54b-sctx7" Jan 21 15:38:49 crc kubenswrapper[5021]: I0121 15:38:49.960719 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-86cb77c54b-sctx7" Jan 21 15:38:51 crc kubenswrapper[5021]: I0121 15:38:51.318117 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-86cb77c54b-sctx7"] Jan 21 15:38:52 crc kubenswrapper[5021]: I0121 15:38:52.025503 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-86cb77c54b-sctx7" event={"ID":"4e8ca7a2-523e-4c60-8032-020eed1d3acc","Type":"ContainerStarted","Data":"1f618d7d5296e3fd1780ce6fa83f53a26d717b76c18eea936cd1b831d172ef9a"} Jan 21 15:38:53 crc kubenswrapper[5021]: I0121 15:38:53.034172 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-f4fb5df64-chdrq" event={"ID":"f1b5f0fa-4a66-443b-9178-8f51aee84d1f","Type":"ContainerStarted","Data":"6fa73c3b1acdef0f29a30260050ff7acd525ad4c82b0d1a3519debb134cc6fd0"} Jan 21 15:38:53 crc kubenswrapper[5021]: I0121 15:38:53.035814 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-86cb77c54b-sctx7" event={"ID":"4e8ca7a2-523e-4c60-8032-020eed1d3acc","Type":"ContainerStarted","Data":"71aeee2ccb9bbb6e6e5363135d832f44f8090d304c1aac14b3a905ab48d112e0"} Jan 21 15:38:53 crc kubenswrapper[5021]: I0121 15:38:53.037261 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-855d9ccff4-ds76b" event={"ID":"6fc31342-f27a-4bc1-8121-a283abb689fa","Type":"ContainerStarted","Data":"213a46197bd0fcf4514217bfa113509714b52defcdea634642e6af41eb8b4583"} Jan 21 15:38:53 crc kubenswrapper[5021]: I0121 15:38:53.060927 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-f4fb5df64-chdrq" podStartSLOduration=2.035337819 podStartE2EDuration="17.060892538s" podCreationTimestamp="2026-01-21 15:38:36 +0000 UTC" firstStartedPulling="2026-01-21 15:38:36.935855791 +0000 UTC m=+858.470969680" lastFinishedPulling="2026-01-21 15:38:51.96141051 +0000 UTC m=+873.496524399" observedRunningTime="2026-01-21 15:38:53.057176674 +0000 UTC m=+874.592290563" watchObservedRunningTime="2026-01-21 15:38:53.060892538 +0000 UTC m=+874.596006417" Jan 21 15:38:53 crc kubenswrapper[5021]: I0121 15:38:53.078661 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-86cb77c54b-sctx7" podStartSLOduration=4.078639452 podStartE2EDuration="4.078639452s" podCreationTimestamp="2026-01-21 15:38:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:38:53.073612502 +0000 UTC m=+874.608726391" watchObservedRunningTime="2026-01-21 15:38:53.078639452 +0000 UTC m=+874.613753341" Jan 21 15:38:53 crc kubenswrapper[5021]: I0121 15:38:53.133554 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-855d9ccff4-ds76b" podStartSLOduration=2.452830036 podStartE2EDuration="14.13352037s" podCreationTimestamp="2026-01-21 15:38:39 +0000 UTC" firstStartedPulling="2026-01-21 15:38:40.319057643 +0000 UTC m=+861.854171532" lastFinishedPulling="2026-01-21 15:38:51.999747977 +0000 UTC m=+873.534861866" observedRunningTime="2026-01-21 15:38:53.121176616 +0000 UTC m=+874.656290505" watchObservedRunningTime="2026-01-21 15:38:53.13352037 +0000 UTC m=+874.668634259" Jan 21 15:38:54 crc kubenswrapper[5021]: I0121 15:38:54.043132 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-f4fb5df64-chdrq" Jan 21 15:39:01 crc kubenswrapper[5021]: I0121 15:39:01.528186 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-f4fb5df64-chdrq" Jan 21 15:39:05 crc kubenswrapper[5021]: I0121 15:39:04.609467 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-jjxh2"] Jan 21 15:39:05 crc kubenswrapper[5021]: I0121 15:39:04.611248 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-jjxh2" Jan 21 15:39:05 crc kubenswrapper[5021]: I0121 15:39:04.613797 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Jan 21 15:39:05 crc kubenswrapper[5021]: I0121 15:39:04.614079 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-tkw6n" Jan 21 15:39:05 crc kubenswrapper[5021]: I0121 15:39:04.614287 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Jan 21 15:39:05 crc kubenswrapper[5021]: I0121 15:39:04.638688 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-jjxh2"] Jan 21 15:39:05 crc kubenswrapper[5021]: I0121 15:39:04.746842 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hdwt5\" (UniqueName: \"kubernetes.io/projected/6a8438d2-37ac-4538-83f8-caa11009999e-kube-api-access-hdwt5\") pod \"openstack-operator-index-jjxh2\" (UID: \"6a8438d2-37ac-4538-83f8-caa11009999e\") " pod="openstack-operators/openstack-operator-index-jjxh2" Jan 21 15:39:05 crc kubenswrapper[5021]: I0121 15:39:04.848494 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hdwt5\" (UniqueName: \"kubernetes.io/projected/6a8438d2-37ac-4538-83f8-caa11009999e-kube-api-access-hdwt5\") pod \"openstack-operator-index-jjxh2\" (UID: \"6a8438d2-37ac-4538-83f8-caa11009999e\") " pod="openstack-operators/openstack-operator-index-jjxh2" Jan 21 15:39:05 crc kubenswrapper[5021]: I0121 15:39:04.870558 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hdwt5\" (UniqueName: \"kubernetes.io/projected/6a8438d2-37ac-4538-83f8-caa11009999e-kube-api-access-hdwt5\") pod \"openstack-operator-index-jjxh2\" (UID: \"6a8438d2-37ac-4538-83f8-caa11009999e\") " pod="openstack-operators/openstack-operator-index-jjxh2" Jan 21 15:39:05 crc kubenswrapper[5021]: I0121 15:39:04.938066 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-jjxh2" Jan 21 15:39:05 crc kubenswrapper[5021]: I0121 15:39:05.458306 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-jjxh2"] Jan 21 15:39:06 crc kubenswrapper[5021]: I0121 15:39:06.136923 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-jjxh2" event={"ID":"6a8438d2-37ac-4538-83f8-caa11009999e","Type":"ContainerStarted","Data":"82de8ecd692349518e042c258225390df0ff61a4f525610aae3288af1cc3f7fd"} Jan 21 15:39:08 crc kubenswrapper[5021]: I0121 15:39:08.150598 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-jjxh2" event={"ID":"6a8438d2-37ac-4538-83f8-caa11009999e","Type":"ContainerStarted","Data":"d9e7744a519d935d0c9f66928d344e77f2af461c15d77db97f4049ef96a5f7bc"} Jan 21 15:39:08 crc kubenswrapper[5021]: I0121 15:39:08.169654 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-jjxh2" podStartSLOduration=1.883148815 podStartE2EDuration="4.169636198s" podCreationTimestamp="2026-01-21 15:39:04 +0000 UTC" firstStartedPulling="2026-01-21 15:39:05.463577964 +0000 UTC m=+886.998691853" lastFinishedPulling="2026-01-21 15:39:07.750065347 +0000 UTC m=+889.285179236" observedRunningTime="2026-01-21 15:39:08.168249019 +0000 UTC m=+889.703362898" watchObservedRunningTime="2026-01-21 15:39:08.169636198 +0000 UTC m=+889.704750087" Jan 21 15:39:08 crc kubenswrapper[5021]: I0121 15:39:08.181990 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-jjxh2"] Jan 21 15:39:08 crc kubenswrapper[5021]: I0121 15:39:08.987755 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-jwdkv"] Jan 21 15:39:08 crc kubenswrapper[5021]: I0121 15:39:08.988960 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-jwdkv" Jan 21 15:39:09 crc kubenswrapper[5021]: I0121 15:39:09.009849 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-jwdkv"] Jan 21 15:39:09 crc kubenswrapper[5021]: I0121 15:39:09.022022 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b5z77\" (UniqueName: \"kubernetes.io/projected/2c93cbb3-1ee5-47ca-a383-d460bf952648-kube-api-access-b5z77\") pod \"openstack-operator-index-jwdkv\" (UID: \"2c93cbb3-1ee5-47ca-a383-d460bf952648\") " pod="openstack-operators/openstack-operator-index-jwdkv" Jan 21 15:39:09 crc kubenswrapper[5021]: I0121 15:39:09.123133 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b5z77\" (UniqueName: \"kubernetes.io/projected/2c93cbb3-1ee5-47ca-a383-d460bf952648-kube-api-access-b5z77\") pod \"openstack-operator-index-jwdkv\" (UID: \"2c93cbb3-1ee5-47ca-a383-d460bf952648\") " pod="openstack-operators/openstack-operator-index-jwdkv" Jan 21 15:39:09 crc kubenswrapper[5021]: I0121 15:39:09.143668 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b5z77\" (UniqueName: \"kubernetes.io/projected/2c93cbb3-1ee5-47ca-a383-d460bf952648-kube-api-access-b5z77\") pod \"openstack-operator-index-jwdkv\" (UID: \"2c93cbb3-1ee5-47ca-a383-d460bf952648\") " pod="openstack-operators/openstack-operator-index-jwdkv" Jan 21 15:39:09 crc kubenswrapper[5021]: I0121 15:39:09.314834 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-jwdkv" Jan 21 15:39:09 crc kubenswrapper[5021]: I0121 15:39:09.514190 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-jwdkv"] Jan 21 15:39:10 crc kubenswrapper[5021]: I0121 15:39:10.164091 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-jwdkv" event={"ID":"2c93cbb3-1ee5-47ca-a383-d460bf952648","Type":"ContainerStarted","Data":"19bf2a3bb78fad46f76af56ba0ab62414ef6a3c6519312d1d993ce3c0b01f086"} Jan 21 15:39:10 crc kubenswrapper[5021]: I0121 15:39:10.164616 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-jwdkv" event={"ID":"2c93cbb3-1ee5-47ca-a383-d460bf952648","Type":"ContainerStarted","Data":"c4b83cce7c46eed108262f1e4a98b99db6e8c79fd928047f404e719627941eaf"} Jan 21 15:39:10 crc kubenswrapper[5021]: I0121 15:39:10.164402 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-jjxh2" podUID="6a8438d2-37ac-4538-83f8-caa11009999e" containerName="registry-server" containerID="cri-o://d9e7744a519d935d0c9f66928d344e77f2af461c15d77db97f4049ef96a5f7bc" gracePeriod=2 Jan 21 15:39:10 crc kubenswrapper[5021]: I0121 15:39:10.185509 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-jwdkv" podStartSLOduration=1.781632844 podStartE2EDuration="2.185488967s" podCreationTimestamp="2026-01-21 15:39:08 +0000 UTC" firstStartedPulling="2026-01-21 15:39:09.525452992 +0000 UTC m=+891.060566881" lastFinishedPulling="2026-01-21 15:39:09.929309115 +0000 UTC m=+891.464423004" observedRunningTime="2026-01-21 15:39:10.183658375 +0000 UTC m=+891.718772264" watchObservedRunningTime="2026-01-21 15:39:10.185488967 +0000 UTC m=+891.720602856" Jan 21 15:39:10 crc kubenswrapper[5021]: I0121 15:39:10.562817 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-jjxh2" Jan 21 15:39:10 crc kubenswrapper[5021]: I0121 15:39:10.662557 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hdwt5\" (UniqueName: \"kubernetes.io/projected/6a8438d2-37ac-4538-83f8-caa11009999e-kube-api-access-hdwt5\") pod \"6a8438d2-37ac-4538-83f8-caa11009999e\" (UID: \"6a8438d2-37ac-4538-83f8-caa11009999e\") " Jan 21 15:39:10 crc kubenswrapper[5021]: I0121 15:39:10.670129 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6a8438d2-37ac-4538-83f8-caa11009999e-kube-api-access-hdwt5" (OuterVolumeSpecName: "kube-api-access-hdwt5") pod "6a8438d2-37ac-4538-83f8-caa11009999e" (UID: "6a8438d2-37ac-4538-83f8-caa11009999e"). InnerVolumeSpecName "kube-api-access-hdwt5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:39:10 crc kubenswrapper[5021]: I0121 15:39:10.764686 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hdwt5\" (UniqueName: \"kubernetes.io/projected/6a8438d2-37ac-4538-83f8-caa11009999e-kube-api-access-hdwt5\") on node \"crc\" DevicePath \"\"" Jan 21 15:39:10 crc kubenswrapper[5021]: E0121 15:39:10.875572 5021 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6a8438d2_37ac_4538_83f8_caa11009999e.slice/crio-82de8ecd692349518e042c258225390df0ff61a4f525610aae3288af1cc3f7fd\": RecentStats: unable to find data in memory cache]" Jan 21 15:39:11 crc kubenswrapper[5021]: I0121 15:39:11.178046 5021 generic.go:334] "Generic (PLEG): container finished" podID="6a8438d2-37ac-4538-83f8-caa11009999e" containerID="d9e7744a519d935d0c9f66928d344e77f2af461c15d77db97f4049ef96a5f7bc" exitCode=0 Jan 21 15:39:11 crc kubenswrapper[5021]: I0121 15:39:11.178157 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-jjxh2" event={"ID":"6a8438d2-37ac-4538-83f8-caa11009999e","Type":"ContainerDied","Data":"d9e7744a519d935d0c9f66928d344e77f2af461c15d77db97f4049ef96a5f7bc"} Jan 21 15:39:11 crc kubenswrapper[5021]: I0121 15:39:11.178414 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-jjxh2" event={"ID":"6a8438d2-37ac-4538-83f8-caa11009999e","Type":"ContainerDied","Data":"82de8ecd692349518e042c258225390df0ff61a4f525610aae3288af1cc3f7fd"} Jan 21 15:39:11 crc kubenswrapper[5021]: I0121 15:39:11.178441 5021 scope.go:117] "RemoveContainer" containerID="d9e7744a519d935d0c9f66928d344e77f2af461c15d77db97f4049ef96a5f7bc" Jan 21 15:39:11 crc kubenswrapper[5021]: I0121 15:39:11.178240 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-jjxh2" Jan 21 15:39:11 crc kubenswrapper[5021]: I0121 15:39:11.207405 5021 scope.go:117] "RemoveContainer" containerID="d9e7744a519d935d0c9f66928d344e77f2af461c15d77db97f4049ef96a5f7bc" Jan 21 15:39:11 crc kubenswrapper[5021]: E0121 15:39:11.208101 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d9e7744a519d935d0c9f66928d344e77f2af461c15d77db97f4049ef96a5f7bc\": container with ID starting with d9e7744a519d935d0c9f66928d344e77f2af461c15d77db97f4049ef96a5f7bc not found: ID does not exist" containerID="d9e7744a519d935d0c9f66928d344e77f2af461c15d77db97f4049ef96a5f7bc" Jan 21 15:39:11 crc kubenswrapper[5021]: I0121 15:39:11.208152 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d9e7744a519d935d0c9f66928d344e77f2af461c15d77db97f4049ef96a5f7bc"} err="failed to get container status \"d9e7744a519d935d0c9f66928d344e77f2af461c15d77db97f4049ef96a5f7bc\": rpc error: code = NotFound desc = could not find container \"d9e7744a519d935d0c9f66928d344e77f2af461c15d77db97f4049ef96a5f7bc\": container with ID starting with d9e7744a519d935d0c9f66928d344e77f2af461c15d77db97f4049ef96a5f7bc not found: ID does not exist" Jan 21 15:39:11 crc kubenswrapper[5021]: I0121 15:39:11.208190 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-jjxh2"] Jan 21 15:39:11 crc kubenswrapper[5021]: I0121 15:39:11.214127 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-jjxh2"] Jan 21 15:39:11 crc kubenswrapper[5021]: I0121 15:39:11.595191 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-m25vd"] Jan 21 15:39:11 crc kubenswrapper[5021]: E0121 15:39:11.595453 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a8438d2-37ac-4538-83f8-caa11009999e" containerName="registry-server" Jan 21 15:39:11 crc kubenswrapper[5021]: I0121 15:39:11.595463 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a8438d2-37ac-4538-83f8-caa11009999e" containerName="registry-server" Jan 21 15:39:11 crc kubenswrapper[5021]: I0121 15:39:11.595574 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a8438d2-37ac-4538-83f8-caa11009999e" containerName="registry-server" Jan 21 15:39:11 crc kubenswrapper[5021]: I0121 15:39:11.596483 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-m25vd" Jan 21 15:39:11 crc kubenswrapper[5021]: I0121 15:39:11.606205 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-m25vd"] Jan 21 15:39:11 crc kubenswrapper[5021]: I0121 15:39:11.681308 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0dc208ed-5c94-48b7-919c-89ad83b76459-utilities\") pod \"certified-operators-m25vd\" (UID: \"0dc208ed-5c94-48b7-919c-89ad83b76459\") " pod="openshift-marketplace/certified-operators-m25vd" Jan 21 15:39:11 crc kubenswrapper[5021]: I0121 15:39:11.681394 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ns92j\" (UniqueName: \"kubernetes.io/projected/0dc208ed-5c94-48b7-919c-89ad83b76459-kube-api-access-ns92j\") pod \"certified-operators-m25vd\" (UID: \"0dc208ed-5c94-48b7-919c-89ad83b76459\") " pod="openshift-marketplace/certified-operators-m25vd" Jan 21 15:39:11 crc kubenswrapper[5021]: I0121 15:39:11.681775 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0dc208ed-5c94-48b7-919c-89ad83b76459-catalog-content\") pod \"certified-operators-m25vd\" (UID: \"0dc208ed-5c94-48b7-919c-89ad83b76459\") " pod="openshift-marketplace/certified-operators-m25vd" Jan 21 15:39:11 crc kubenswrapper[5021]: I0121 15:39:11.783103 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ns92j\" (UniqueName: \"kubernetes.io/projected/0dc208ed-5c94-48b7-919c-89ad83b76459-kube-api-access-ns92j\") pod \"certified-operators-m25vd\" (UID: \"0dc208ed-5c94-48b7-919c-89ad83b76459\") " pod="openshift-marketplace/certified-operators-m25vd" Jan 21 15:39:11 crc kubenswrapper[5021]: I0121 15:39:11.783207 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0dc208ed-5c94-48b7-919c-89ad83b76459-catalog-content\") pod \"certified-operators-m25vd\" (UID: \"0dc208ed-5c94-48b7-919c-89ad83b76459\") " pod="openshift-marketplace/certified-operators-m25vd" Jan 21 15:39:11 crc kubenswrapper[5021]: I0121 15:39:11.783243 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0dc208ed-5c94-48b7-919c-89ad83b76459-utilities\") pod \"certified-operators-m25vd\" (UID: \"0dc208ed-5c94-48b7-919c-89ad83b76459\") " pod="openshift-marketplace/certified-operators-m25vd" Jan 21 15:39:11 crc kubenswrapper[5021]: I0121 15:39:11.783701 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0dc208ed-5c94-48b7-919c-89ad83b76459-utilities\") pod \"certified-operators-m25vd\" (UID: \"0dc208ed-5c94-48b7-919c-89ad83b76459\") " pod="openshift-marketplace/certified-operators-m25vd" Jan 21 15:39:11 crc kubenswrapper[5021]: I0121 15:39:11.783780 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0dc208ed-5c94-48b7-919c-89ad83b76459-catalog-content\") pod \"certified-operators-m25vd\" (UID: \"0dc208ed-5c94-48b7-919c-89ad83b76459\") " pod="openshift-marketplace/certified-operators-m25vd" Jan 21 15:39:11 crc kubenswrapper[5021]: I0121 15:39:11.808097 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ns92j\" (UniqueName: \"kubernetes.io/projected/0dc208ed-5c94-48b7-919c-89ad83b76459-kube-api-access-ns92j\") pod \"certified-operators-m25vd\" (UID: \"0dc208ed-5c94-48b7-919c-89ad83b76459\") " pod="openshift-marketplace/certified-operators-m25vd" Jan 21 15:39:11 crc kubenswrapper[5021]: I0121 15:39:11.922835 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-m25vd" Jan 21 15:39:12 crc kubenswrapper[5021]: I0121 15:39:12.460618 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-m25vd"] Jan 21 15:39:12 crc kubenswrapper[5021]: W0121 15:39:12.468698 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0dc208ed_5c94_48b7_919c_89ad83b76459.slice/crio-b8af4c1373a2154d70a6ac21e0b892f2efcf09f3b464e875558413ddca9c6514 WatchSource:0}: Error finding container b8af4c1373a2154d70a6ac21e0b892f2efcf09f3b464e875558413ddca9c6514: Status 404 returned error can't find the container with id b8af4c1373a2154d70a6ac21e0b892f2efcf09f3b464e875558413ddca9c6514 Jan 21 15:39:12 crc kubenswrapper[5021]: I0121 15:39:12.746682 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6a8438d2-37ac-4538-83f8-caa11009999e" path="/var/lib/kubelet/pods/6a8438d2-37ac-4538-83f8-caa11009999e/volumes" Jan 21 15:39:13 crc kubenswrapper[5021]: I0121 15:39:13.197769 5021 generic.go:334] "Generic (PLEG): container finished" podID="0dc208ed-5c94-48b7-919c-89ad83b76459" containerID="15cc96cc3ee9192d88ddd1f120c575e9b2eeb5f127e53f22867dc43174477823" exitCode=0 Jan 21 15:39:13 crc kubenswrapper[5021]: I0121 15:39:13.197823 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-m25vd" event={"ID":"0dc208ed-5c94-48b7-919c-89ad83b76459","Type":"ContainerDied","Data":"15cc96cc3ee9192d88ddd1f120c575e9b2eeb5f127e53f22867dc43174477823"} Jan 21 15:39:13 crc kubenswrapper[5021]: I0121 15:39:13.197852 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-m25vd" event={"ID":"0dc208ed-5c94-48b7-919c-89ad83b76459","Type":"ContainerStarted","Data":"b8af4c1373a2154d70a6ac21e0b892f2efcf09f3b464e875558413ddca9c6514"} Jan 21 15:39:14 crc kubenswrapper[5021]: I0121 15:39:14.205710 5021 generic.go:334] "Generic (PLEG): container finished" podID="0dc208ed-5c94-48b7-919c-89ad83b76459" containerID="169d3f8874e0e84266c1de4b56327eec887516af611253fa5515ca7fae458149" exitCode=0 Jan 21 15:39:14 crc kubenswrapper[5021]: I0121 15:39:14.205763 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-m25vd" event={"ID":"0dc208ed-5c94-48b7-919c-89ad83b76459","Type":"ContainerDied","Data":"169d3f8874e0e84266c1de4b56327eec887516af611253fa5515ca7fae458149"} Jan 21 15:39:19 crc kubenswrapper[5021]: I0121 15:39:19.315873 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-jwdkv" Jan 21 15:39:19 crc kubenswrapper[5021]: I0121 15:39:19.316923 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-jwdkv" Jan 21 15:39:19 crc kubenswrapper[5021]: I0121 15:39:19.350686 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-jwdkv" Jan 21 15:39:20 crc kubenswrapper[5021]: I0121 15:39:20.268087 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-jwdkv" Jan 21 15:39:22 crc kubenswrapper[5021]: I0121 15:39:22.031853 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/f9475b8e0dbd19b900b29a99cbbde633fbf853f7ac56ad0f8ef85c6293rvznz"] Jan 21 15:39:22 crc kubenswrapper[5021]: I0121 15:39:22.034030 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/f9475b8e0dbd19b900b29a99cbbde633fbf853f7ac56ad0f8ef85c6293rvznz" Jan 21 15:39:22 crc kubenswrapper[5021]: I0121 15:39:22.037843 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-s9lzv" Jan 21 15:39:22 crc kubenswrapper[5021]: I0121 15:39:22.041284 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/f9475b8e0dbd19b900b29a99cbbde633fbf853f7ac56ad0f8ef85c6293rvznz"] Jan 21 15:39:22 crc kubenswrapper[5021]: I0121 15:39:22.151639 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b61e6e25-934e-4eb5-ba83-7aca994252fc-util\") pod \"f9475b8e0dbd19b900b29a99cbbde633fbf853f7ac56ad0f8ef85c6293rvznz\" (UID: \"b61e6e25-934e-4eb5-ba83-7aca994252fc\") " pod="openstack-operators/f9475b8e0dbd19b900b29a99cbbde633fbf853f7ac56ad0f8ef85c6293rvznz" Jan 21 15:39:22 crc kubenswrapper[5021]: I0121 15:39:22.151714 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b61e6e25-934e-4eb5-ba83-7aca994252fc-bundle\") pod \"f9475b8e0dbd19b900b29a99cbbde633fbf853f7ac56ad0f8ef85c6293rvznz\" (UID: \"b61e6e25-934e-4eb5-ba83-7aca994252fc\") " pod="openstack-operators/f9475b8e0dbd19b900b29a99cbbde633fbf853f7ac56ad0f8ef85c6293rvznz" Jan 21 15:39:22 crc kubenswrapper[5021]: I0121 15:39:22.151803 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pxb26\" (UniqueName: \"kubernetes.io/projected/b61e6e25-934e-4eb5-ba83-7aca994252fc-kube-api-access-pxb26\") pod \"f9475b8e0dbd19b900b29a99cbbde633fbf853f7ac56ad0f8ef85c6293rvznz\" (UID: \"b61e6e25-934e-4eb5-ba83-7aca994252fc\") " pod="openstack-operators/f9475b8e0dbd19b900b29a99cbbde633fbf853f7ac56ad0f8ef85c6293rvznz" Jan 21 15:39:22 crc kubenswrapper[5021]: I0121 15:39:22.253682 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b61e6e25-934e-4eb5-ba83-7aca994252fc-bundle\") pod \"f9475b8e0dbd19b900b29a99cbbde633fbf853f7ac56ad0f8ef85c6293rvznz\" (UID: \"b61e6e25-934e-4eb5-ba83-7aca994252fc\") " pod="openstack-operators/f9475b8e0dbd19b900b29a99cbbde633fbf853f7ac56ad0f8ef85c6293rvznz" Jan 21 15:39:22 crc kubenswrapper[5021]: I0121 15:39:22.253828 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pxb26\" (UniqueName: \"kubernetes.io/projected/b61e6e25-934e-4eb5-ba83-7aca994252fc-kube-api-access-pxb26\") pod \"f9475b8e0dbd19b900b29a99cbbde633fbf853f7ac56ad0f8ef85c6293rvznz\" (UID: \"b61e6e25-934e-4eb5-ba83-7aca994252fc\") " pod="openstack-operators/f9475b8e0dbd19b900b29a99cbbde633fbf853f7ac56ad0f8ef85c6293rvznz" Jan 21 15:39:22 crc kubenswrapper[5021]: I0121 15:39:22.253871 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b61e6e25-934e-4eb5-ba83-7aca994252fc-util\") pod \"f9475b8e0dbd19b900b29a99cbbde633fbf853f7ac56ad0f8ef85c6293rvznz\" (UID: \"b61e6e25-934e-4eb5-ba83-7aca994252fc\") " pod="openstack-operators/f9475b8e0dbd19b900b29a99cbbde633fbf853f7ac56ad0f8ef85c6293rvznz" Jan 21 15:39:22 crc kubenswrapper[5021]: I0121 15:39:22.254275 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b61e6e25-934e-4eb5-ba83-7aca994252fc-bundle\") pod \"f9475b8e0dbd19b900b29a99cbbde633fbf853f7ac56ad0f8ef85c6293rvznz\" (UID: \"b61e6e25-934e-4eb5-ba83-7aca994252fc\") " pod="openstack-operators/f9475b8e0dbd19b900b29a99cbbde633fbf853f7ac56ad0f8ef85c6293rvznz" Jan 21 15:39:22 crc kubenswrapper[5021]: I0121 15:39:22.254297 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b61e6e25-934e-4eb5-ba83-7aca994252fc-util\") pod \"f9475b8e0dbd19b900b29a99cbbde633fbf853f7ac56ad0f8ef85c6293rvznz\" (UID: \"b61e6e25-934e-4eb5-ba83-7aca994252fc\") " pod="openstack-operators/f9475b8e0dbd19b900b29a99cbbde633fbf853f7ac56ad0f8ef85c6293rvznz" Jan 21 15:39:22 crc kubenswrapper[5021]: I0121 15:39:22.277937 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pxb26\" (UniqueName: \"kubernetes.io/projected/b61e6e25-934e-4eb5-ba83-7aca994252fc-kube-api-access-pxb26\") pod \"f9475b8e0dbd19b900b29a99cbbde633fbf853f7ac56ad0f8ef85c6293rvznz\" (UID: \"b61e6e25-934e-4eb5-ba83-7aca994252fc\") " pod="openstack-operators/f9475b8e0dbd19b900b29a99cbbde633fbf853f7ac56ad0f8ef85c6293rvznz" Jan 21 15:39:22 crc kubenswrapper[5021]: I0121 15:39:22.358014 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/f9475b8e0dbd19b900b29a99cbbde633fbf853f7ac56ad0f8ef85c6293rvznz" Jan 21 15:39:23 crc kubenswrapper[5021]: I0121 15:39:23.805585 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/f9475b8e0dbd19b900b29a99cbbde633fbf853f7ac56ad0f8ef85c6293rvznz"] Jan 21 15:39:23 crc kubenswrapper[5021]: W0121 15:39:23.817076 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb61e6e25_934e_4eb5_ba83_7aca994252fc.slice/crio-573830666f7e590063c251921b0bb91eeb451ab53170564da59cbe559f6d7bd1 WatchSource:0}: Error finding container 573830666f7e590063c251921b0bb91eeb451ab53170564da59cbe559f6d7bd1: Status 404 returned error can't find the container with id 573830666f7e590063c251921b0bb91eeb451ab53170564da59cbe559f6d7bd1 Jan 21 15:39:24 crc kubenswrapper[5021]: I0121 15:39:24.272923 5021 generic.go:334] "Generic (PLEG): container finished" podID="b61e6e25-934e-4eb5-ba83-7aca994252fc" containerID="f972ac3e9a9418f3336360dc88ff8a1a39887d252269e8eceef701c8be020e7e" exitCode=0 Jan 21 15:39:24 crc kubenswrapper[5021]: I0121 15:39:24.273040 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/f9475b8e0dbd19b900b29a99cbbde633fbf853f7ac56ad0f8ef85c6293rvznz" event={"ID":"b61e6e25-934e-4eb5-ba83-7aca994252fc","Type":"ContainerDied","Data":"f972ac3e9a9418f3336360dc88ff8a1a39887d252269e8eceef701c8be020e7e"} Jan 21 15:39:24 crc kubenswrapper[5021]: I0121 15:39:24.273109 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/f9475b8e0dbd19b900b29a99cbbde633fbf853f7ac56ad0f8ef85c6293rvznz" event={"ID":"b61e6e25-934e-4eb5-ba83-7aca994252fc","Type":"ContainerStarted","Data":"573830666f7e590063c251921b0bb91eeb451ab53170564da59cbe559f6d7bd1"} Jan 21 15:39:24 crc kubenswrapper[5021]: I0121 15:39:24.276282 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-m25vd" event={"ID":"0dc208ed-5c94-48b7-919c-89ad83b76459","Type":"ContainerStarted","Data":"3ae63af29e36af76907b6cf16b0e245e3a6b48da075b769771242e010ec3f9da"} Jan 21 15:39:24 crc kubenswrapper[5021]: I0121 15:39:24.321571 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-m25vd" podStartSLOduration=3.128291331 podStartE2EDuration="13.321541189s" podCreationTimestamp="2026-01-21 15:39:11 +0000 UTC" firstStartedPulling="2026-01-21 15:39:13.201478049 +0000 UTC m=+894.736591948" lastFinishedPulling="2026-01-21 15:39:23.394727917 +0000 UTC m=+904.929841806" observedRunningTime="2026-01-21 15:39:24.31836643 +0000 UTC m=+905.853480319" watchObservedRunningTime="2026-01-21 15:39:24.321541189 +0000 UTC m=+905.856655078" Jan 21 15:39:25 crc kubenswrapper[5021]: I0121 15:39:25.284599 5021 generic.go:334] "Generic (PLEG): container finished" podID="b61e6e25-934e-4eb5-ba83-7aca994252fc" containerID="38b63e50b44cbd78860874d4e62c0f4148a15c2c606b6d6953d156d56aab330c" exitCode=0 Jan 21 15:39:25 crc kubenswrapper[5021]: I0121 15:39:25.284727 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/f9475b8e0dbd19b900b29a99cbbde633fbf853f7ac56ad0f8ef85c6293rvznz" event={"ID":"b61e6e25-934e-4eb5-ba83-7aca994252fc","Type":"ContainerDied","Data":"38b63e50b44cbd78860874d4e62c0f4148a15c2c606b6d6953d156d56aab330c"} Jan 21 15:39:26 crc kubenswrapper[5021]: I0121 15:39:26.293201 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/f9475b8e0dbd19b900b29a99cbbde633fbf853f7ac56ad0f8ef85c6293rvznz" event={"ID":"b61e6e25-934e-4eb5-ba83-7aca994252fc","Type":"ContainerStarted","Data":"38a6782ee6a62fd63da112988785530f4e1fa7c5f96d66a5cc9fe0f8f19c3851"} Jan 21 15:39:26 crc kubenswrapper[5021]: I0121 15:39:26.310758 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/f9475b8e0dbd19b900b29a99cbbde633fbf853f7ac56ad0f8ef85c6293rvznz" podStartSLOduration=4.032870679 podStartE2EDuration="4.310736275s" podCreationTimestamp="2026-01-21 15:39:22 +0000 UTC" firstStartedPulling="2026-01-21 15:39:24.275008283 +0000 UTC m=+905.810122172" lastFinishedPulling="2026-01-21 15:39:24.552873879 +0000 UTC m=+906.087987768" observedRunningTime="2026-01-21 15:39:26.306554429 +0000 UTC m=+907.841668348" watchObservedRunningTime="2026-01-21 15:39:26.310736275 +0000 UTC m=+907.845850154" Jan 21 15:39:27 crc kubenswrapper[5021]: I0121 15:39:27.300863 5021 generic.go:334] "Generic (PLEG): container finished" podID="b61e6e25-934e-4eb5-ba83-7aca994252fc" containerID="38a6782ee6a62fd63da112988785530f4e1fa7c5f96d66a5cc9fe0f8f19c3851" exitCode=0 Jan 21 15:39:27 crc kubenswrapper[5021]: I0121 15:39:27.300963 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/f9475b8e0dbd19b900b29a99cbbde633fbf853f7ac56ad0f8ef85c6293rvznz" event={"ID":"b61e6e25-934e-4eb5-ba83-7aca994252fc","Type":"ContainerDied","Data":"38a6782ee6a62fd63da112988785530f4e1fa7c5f96d66a5cc9fe0f8f19c3851"} Jan 21 15:39:28 crc kubenswrapper[5021]: I0121 15:39:28.548428 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/f9475b8e0dbd19b900b29a99cbbde633fbf853f7ac56ad0f8ef85c6293rvznz" Jan 21 15:39:28 crc kubenswrapper[5021]: I0121 15:39:28.656637 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pxb26\" (UniqueName: \"kubernetes.io/projected/b61e6e25-934e-4eb5-ba83-7aca994252fc-kube-api-access-pxb26\") pod \"b61e6e25-934e-4eb5-ba83-7aca994252fc\" (UID: \"b61e6e25-934e-4eb5-ba83-7aca994252fc\") " Jan 21 15:39:28 crc kubenswrapper[5021]: I0121 15:39:28.656709 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b61e6e25-934e-4eb5-ba83-7aca994252fc-bundle\") pod \"b61e6e25-934e-4eb5-ba83-7aca994252fc\" (UID: \"b61e6e25-934e-4eb5-ba83-7aca994252fc\") " Jan 21 15:39:28 crc kubenswrapper[5021]: I0121 15:39:28.656743 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b61e6e25-934e-4eb5-ba83-7aca994252fc-util\") pod \"b61e6e25-934e-4eb5-ba83-7aca994252fc\" (UID: \"b61e6e25-934e-4eb5-ba83-7aca994252fc\") " Jan 21 15:39:28 crc kubenswrapper[5021]: I0121 15:39:28.657399 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b61e6e25-934e-4eb5-ba83-7aca994252fc-bundle" (OuterVolumeSpecName: "bundle") pod "b61e6e25-934e-4eb5-ba83-7aca994252fc" (UID: "b61e6e25-934e-4eb5-ba83-7aca994252fc"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:39:28 crc kubenswrapper[5021]: I0121 15:39:28.662301 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b61e6e25-934e-4eb5-ba83-7aca994252fc-kube-api-access-pxb26" (OuterVolumeSpecName: "kube-api-access-pxb26") pod "b61e6e25-934e-4eb5-ba83-7aca994252fc" (UID: "b61e6e25-934e-4eb5-ba83-7aca994252fc"). InnerVolumeSpecName "kube-api-access-pxb26". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:39:28 crc kubenswrapper[5021]: I0121 15:39:28.670799 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b61e6e25-934e-4eb5-ba83-7aca994252fc-util" (OuterVolumeSpecName: "util") pod "b61e6e25-934e-4eb5-ba83-7aca994252fc" (UID: "b61e6e25-934e-4eb5-ba83-7aca994252fc"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:39:28 crc kubenswrapper[5021]: I0121 15:39:28.759854 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pxb26\" (UniqueName: \"kubernetes.io/projected/b61e6e25-934e-4eb5-ba83-7aca994252fc-kube-api-access-pxb26\") on node \"crc\" DevicePath \"\"" Jan 21 15:39:28 crc kubenswrapper[5021]: I0121 15:39:28.759980 5021 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b61e6e25-934e-4eb5-ba83-7aca994252fc-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:39:28 crc kubenswrapper[5021]: I0121 15:39:28.760009 5021 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b61e6e25-934e-4eb5-ba83-7aca994252fc-util\") on node \"crc\" DevicePath \"\"" Jan 21 15:39:29 crc kubenswrapper[5021]: I0121 15:39:29.314745 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/f9475b8e0dbd19b900b29a99cbbde633fbf853f7ac56ad0f8ef85c6293rvznz" event={"ID":"b61e6e25-934e-4eb5-ba83-7aca994252fc","Type":"ContainerDied","Data":"573830666f7e590063c251921b0bb91eeb451ab53170564da59cbe559f6d7bd1"} Jan 21 15:39:29 crc kubenswrapper[5021]: I0121 15:39:29.314860 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/f9475b8e0dbd19b900b29a99cbbde633fbf853f7ac56ad0f8ef85c6293rvznz" Jan 21 15:39:29 crc kubenswrapper[5021]: I0121 15:39:29.314797 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="573830666f7e590063c251921b0bb91eeb451ab53170564da59cbe559f6d7bd1" Jan 21 15:39:30 crc kubenswrapper[5021]: I0121 15:39:30.394235 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-psnh7"] Jan 21 15:39:30 crc kubenswrapper[5021]: E0121 15:39:30.395180 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b61e6e25-934e-4eb5-ba83-7aca994252fc" containerName="pull" Jan 21 15:39:30 crc kubenswrapper[5021]: I0121 15:39:30.395503 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="b61e6e25-934e-4eb5-ba83-7aca994252fc" containerName="pull" Jan 21 15:39:30 crc kubenswrapper[5021]: E0121 15:39:30.395557 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b61e6e25-934e-4eb5-ba83-7aca994252fc" containerName="util" Jan 21 15:39:30 crc kubenswrapper[5021]: I0121 15:39:30.395574 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="b61e6e25-934e-4eb5-ba83-7aca994252fc" containerName="util" Jan 21 15:39:30 crc kubenswrapper[5021]: E0121 15:39:30.395595 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b61e6e25-934e-4eb5-ba83-7aca994252fc" containerName="extract" Jan 21 15:39:30 crc kubenswrapper[5021]: I0121 15:39:30.395611 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="b61e6e25-934e-4eb5-ba83-7aca994252fc" containerName="extract" Jan 21 15:39:30 crc kubenswrapper[5021]: I0121 15:39:30.397355 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="b61e6e25-934e-4eb5-ba83-7aca994252fc" containerName="extract" Jan 21 15:39:30 crc kubenswrapper[5021]: I0121 15:39:30.399456 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-psnh7" Jan 21 15:39:30 crc kubenswrapper[5021]: I0121 15:39:30.408421 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-psnh7"] Jan 21 15:39:30 crc kubenswrapper[5021]: I0121 15:39:30.485597 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rwtwg\" (UniqueName: \"kubernetes.io/projected/41bb94a6-79f0-48ab-ad3d-16da1122223d-kube-api-access-rwtwg\") pod \"redhat-marketplace-psnh7\" (UID: \"41bb94a6-79f0-48ab-ad3d-16da1122223d\") " pod="openshift-marketplace/redhat-marketplace-psnh7" Jan 21 15:39:30 crc kubenswrapper[5021]: I0121 15:39:30.485720 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41bb94a6-79f0-48ab-ad3d-16da1122223d-utilities\") pod \"redhat-marketplace-psnh7\" (UID: \"41bb94a6-79f0-48ab-ad3d-16da1122223d\") " pod="openshift-marketplace/redhat-marketplace-psnh7" Jan 21 15:39:30 crc kubenswrapper[5021]: I0121 15:39:30.485776 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41bb94a6-79f0-48ab-ad3d-16da1122223d-catalog-content\") pod \"redhat-marketplace-psnh7\" (UID: \"41bb94a6-79f0-48ab-ad3d-16da1122223d\") " pod="openshift-marketplace/redhat-marketplace-psnh7" Jan 21 15:39:30 crc kubenswrapper[5021]: I0121 15:39:30.587579 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rwtwg\" (UniqueName: \"kubernetes.io/projected/41bb94a6-79f0-48ab-ad3d-16da1122223d-kube-api-access-rwtwg\") pod \"redhat-marketplace-psnh7\" (UID: \"41bb94a6-79f0-48ab-ad3d-16da1122223d\") " pod="openshift-marketplace/redhat-marketplace-psnh7" Jan 21 15:39:30 crc kubenswrapper[5021]: I0121 15:39:30.587622 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41bb94a6-79f0-48ab-ad3d-16da1122223d-utilities\") pod \"redhat-marketplace-psnh7\" (UID: \"41bb94a6-79f0-48ab-ad3d-16da1122223d\") " pod="openshift-marketplace/redhat-marketplace-psnh7" Jan 21 15:39:30 crc kubenswrapper[5021]: I0121 15:39:30.587656 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41bb94a6-79f0-48ab-ad3d-16da1122223d-catalog-content\") pod \"redhat-marketplace-psnh7\" (UID: \"41bb94a6-79f0-48ab-ad3d-16da1122223d\") " pod="openshift-marketplace/redhat-marketplace-psnh7" Jan 21 15:39:30 crc kubenswrapper[5021]: I0121 15:39:30.588232 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41bb94a6-79f0-48ab-ad3d-16da1122223d-catalog-content\") pod \"redhat-marketplace-psnh7\" (UID: \"41bb94a6-79f0-48ab-ad3d-16da1122223d\") " pod="openshift-marketplace/redhat-marketplace-psnh7" Jan 21 15:39:30 crc kubenswrapper[5021]: I0121 15:39:30.588380 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41bb94a6-79f0-48ab-ad3d-16da1122223d-utilities\") pod \"redhat-marketplace-psnh7\" (UID: \"41bb94a6-79f0-48ab-ad3d-16da1122223d\") " pod="openshift-marketplace/redhat-marketplace-psnh7" Jan 21 15:39:30 crc kubenswrapper[5021]: I0121 15:39:30.607882 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rwtwg\" (UniqueName: \"kubernetes.io/projected/41bb94a6-79f0-48ab-ad3d-16da1122223d-kube-api-access-rwtwg\") pod \"redhat-marketplace-psnh7\" (UID: \"41bb94a6-79f0-48ab-ad3d-16da1122223d\") " pod="openshift-marketplace/redhat-marketplace-psnh7" Jan 21 15:39:30 crc kubenswrapper[5021]: I0121 15:39:30.726678 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-psnh7" Jan 21 15:39:30 crc kubenswrapper[5021]: I0121 15:39:30.954768 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-psnh7"] Jan 21 15:39:31 crc kubenswrapper[5021]: E0121 15:39:31.194597 5021 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod41bb94a6_79f0_48ab_ad3d_16da1122223d.slice/crio-conmon-d3e0a21522dc74b4e46a6fe9e3c0142071dd5d5e0aa847d0b4443adfd859bf41.scope\": RecentStats: unable to find data in memory cache]" Jan 21 15:39:31 crc kubenswrapper[5021]: I0121 15:39:31.333165 5021 generic.go:334] "Generic (PLEG): container finished" podID="41bb94a6-79f0-48ab-ad3d-16da1122223d" containerID="d3e0a21522dc74b4e46a6fe9e3c0142071dd5d5e0aa847d0b4443adfd859bf41" exitCode=0 Jan 21 15:39:31 crc kubenswrapper[5021]: I0121 15:39:31.333219 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-psnh7" event={"ID":"41bb94a6-79f0-48ab-ad3d-16da1122223d","Type":"ContainerDied","Data":"d3e0a21522dc74b4e46a6fe9e3c0142071dd5d5e0aa847d0b4443adfd859bf41"} Jan 21 15:39:31 crc kubenswrapper[5021]: I0121 15:39:31.333501 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-psnh7" event={"ID":"41bb94a6-79f0-48ab-ad3d-16da1122223d","Type":"ContainerStarted","Data":"f30f8c9c2e19d090168eaf2753194775ff02867193023a7781d4def3437c52ae"} Jan 21 15:39:31 crc kubenswrapper[5021]: I0121 15:39:31.923249 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-m25vd" Jan 21 15:39:31 crc kubenswrapper[5021]: I0121 15:39:31.923315 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-m25vd" Jan 21 15:39:32 crc kubenswrapper[5021]: I0121 15:39:32.000039 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-m25vd" Jan 21 15:39:32 crc kubenswrapper[5021]: I0121 15:39:32.393586 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-m25vd" Jan 21 15:39:33 crc kubenswrapper[5021]: I0121 15:39:33.165149 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-init-7f8fb8b79-5jnkm"] Jan 21 15:39:33 crc kubenswrapper[5021]: I0121 15:39:33.167691 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-init-7f8fb8b79-5jnkm" Jan 21 15:39:33 crc kubenswrapper[5021]: I0121 15:39:33.171845 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-init-dockercfg-ktsjt" Jan 21 15:39:33 crc kubenswrapper[5021]: I0121 15:39:33.176566 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-init-7f8fb8b79-5jnkm"] Jan 21 15:39:33 crc kubenswrapper[5021]: I0121 15:39:33.336443 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vg5xl\" (UniqueName: \"kubernetes.io/projected/70b7f82d-fa46-4ef3-b1f8-e790e3e4a540-kube-api-access-vg5xl\") pod \"openstack-operator-controller-init-7f8fb8b79-5jnkm\" (UID: \"70b7f82d-fa46-4ef3-b1f8-e790e3e4a540\") " pod="openstack-operators/openstack-operator-controller-init-7f8fb8b79-5jnkm" Jan 21 15:39:33 crc kubenswrapper[5021]: I0121 15:39:33.437541 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vg5xl\" (UniqueName: \"kubernetes.io/projected/70b7f82d-fa46-4ef3-b1f8-e790e3e4a540-kube-api-access-vg5xl\") pod \"openstack-operator-controller-init-7f8fb8b79-5jnkm\" (UID: \"70b7f82d-fa46-4ef3-b1f8-e790e3e4a540\") " pod="openstack-operators/openstack-operator-controller-init-7f8fb8b79-5jnkm" Jan 21 15:39:33 crc kubenswrapper[5021]: I0121 15:39:33.456193 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vg5xl\" (UniqueName: \"kubernetes.io/projected/70b7f82d-fa46-4ef3-b1f8-e790e3e4a540-kube-api-access-vg5xl\") pod \"openstack-operator-controller-init-7f8fb8b79-5jnkm\" (UID: \"70b7f82d-fa46-4ef3-b1f8-e790e3e4a540\") " pod="openstack-operators/openstack-operator-controller-init-7f8fb8b79-5jnkm" Jan 21 15:39:33 crc kubenswrapper[5021]: I0121 15:39:33.489406 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-init-7f8fb8b79-5jnkm" Jan 21 15:39:33 crc kubenswrapper[5021]: I0121 15:39:33.713751 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-init-7f8fb8b79-5jnkm"] Jan 21 15:39:33 crc kubenswrapper[5021]: W0121 15:39:33.724777 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod70b7f82d_fa46_4ef3_b1f8_e790e3e4a540.slice/crio-59748950978c68db6f9437642a69ea7453ee427bb8be4ffa156e7501caded097 WatchSource:0}: Error finding container 59748950978c68db6f9437642a69ea7453ee427bb8be4ffa156e7501caded097: Status 404 returned error can't find the container with id 59748950978c68db6f9437642a69ea7453ee427bb8be4ffa156e7501caded097 Jan 21 15:39:34 crc kubenswrapper[5021]: I0121 15:39:34.358045 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-psnh7" event={"ID":"41bb94a6-79f0-48ab-ad3d-16da1122223d","Type":"ContainerStarted","Data":"de32457e34dc695ffc488b7c0ed5e155275b7bf14ea912d4e54142863cac7e12"} Jan 21 15:39:34 crc kubenswrapper[5021]: I0121 15:39:34.366187 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-init-7f8fb8b79-5jnkm" event={"ID":"70b7f82d-fa46-4ef3-b1f8-e790e3e4a540","Type":"ContainerStarted","Data":"59748950978c68db6f9437642a69ea7453ee427bb8be4ffa156e7501caded097"} Jan 21 15:39:35 crc kubenswrapper[5021]: I0121 15:39:35.374347 5021 generic.go:334] "Generic (PLEG): container finished" podID="41bb94a6-79f0-48ab-ad3d-16da1122223d" containerID="de32457e34dc695ffc488b7c0ed5e155275b7bf14ea912d4e54142863cac7e12" exitCode=0 Jan 21 15:39:35 crc kubenswrapper[5021]: I0121 15:39:35.374393 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-psnh7" event={"ID":"41bb94a6-79f0-48ab-ad3d-16da1122223d","Type":"ContainerDied","Data":"de32457e34dc695ffc488b7c0ed5e155275b7bf14ea912d4e54142863cac7e12"} Jan 21 15:39:35 crc kubenswrapper[5021]: I0121 15:39:35.384135 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-m25vd"] Jan 21 15:39:35 crc kubenswrapper[5021]: I0121 15:39:35.384400 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-m25vd" podUID="0dc208ed-5c94-48b7-919c-89ad83b76459" containerName="registry-server" containerID="cri-o://3ae63af29e36af76907b6cf16b0e245e3a6b48da075b769771242e010ec3f9da" gracePeriod=2 Jan 21 15:39:36 crc kubenswrapper[5021]: I0121 15:39:36.394299 5021 generic.go:334] "Generic (PLEG): container finished" podID="0dc208ed-5c94-48b7-919c-89ad83b76459" containerID="3ae63af29e36af76907b6cf16b0e245e3a6b48da075b769771242e010ec3f9da" exitCode=0 Jan 21 15:39:36 crc kubenswrapper[5021]: I0121 15:39:36.394459 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-m25vd" event={"ID":"0dc208ed-5c94-48b7-919c-89ad83b76459","Type":"ContainerDied","Data":"3ae63af29e36af76907b6cf16b0e245e3a6b48da075b769771242e010ec3f9da"} Jan 21 15:39:37 crc kubenswrapper[5021]: I0121 15:39:37.192172 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-m25vd" Jan 21 15:39:37 crc kubenswrapper[5021]: I0121 15:39:37.308017 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ns92j\" (UniqueName: \"kubernetes.io/projected/0dc208ed-5c94-48b7-919c-89ad83b76459-kube-api-access-ns92j\") pod \"0dc208ed-5c94-48b7-919c-89ad83b76459\" (UID: \"0dc208ed-5c94-48b7-919c-89ad83b76459\") " Jan 21 15:39:37 crc kubenswrapper[5021]: I0121 15:39:37.308158 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0dc208ed-5c94-48b7-919c-89ad83b76459-utilities\") pod \"0dc208ed-5c94-48b7-919c-89ad83b76459\" (UID: \"0dc208ed-5c94-48b7-919c-89ad83b76459\") " Jan 21 15:39:37 crc kubenswrapper[5021]: I0121 15:39:37.308350 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0dc208ed-5c94-48b7-919c-89ad83b76459-catalog-content\") pod \"0dc208ed-5c94-48b7-919c-89ad83b76459\" (UID: \"0dc208ed-5c94-48b7-919c-89ad83b76459\") " Jan 21 15:39:37 crc kubenswrapper[5021]: I0121 15:39:37.308996 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0dc208ed-5c94-48b7-919c-89ad83b76459-utilities" (OuterVolumeSpecName: "utilities") pod "0dc208ed-5c94-48b7-919c-89ad83b76459" (UID: "0dc208ed-5c94-48b7-919c-89ad83b76459"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:39:37 crc kubenswrapper[5021]: I0121 15:39:37.314420 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0dc208ed-5c94-48b7-919c-89ad83b76459-kube-api-access-ns92j" (OuterVolumeSpecName: "kube-api-access-ns92j") pod "0dc208ed-5c94-48b7-919c-89ad83b76459" (UID: "0dc208ed-5c94-48b7-919c-89ad83b76459"). InnerVolumeSpecName "kube-api-access-ns92j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:39:37 crc kubenswrapper[5021]: I0121 15:39:37.371062 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0dc208ed-5c94-48b7-919c-89ad83b76459-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0dc208ed-5c94-48b7-919c-89ad83b76459" (UID: "0dc208ed-5c94-48b7-919c-89ad83b76459"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:39:37 crc kubenswrapper[5021]: I0121 15:39:37.403421 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-m25vd" event={"ID":"0dc208ed-5c94-48b7-919c-89ad83b76459","Type":"ContainerDied","Data":"b8af4c1373a2154d70a6ac21e0b892f2efcf09f3b464e875558413ddca9c6514"} Jan 21 15:39:37 crc kubenswrapper[5021]: I0121 15:39:37.403477 5021 scope.go:117] "RemoveContainer" containerID="3ae63af29e36af76907b6cf16b0e245e3a6b48da075b769771242e010ec3f9da" Jan 21 15:39:37 crc kubenswrapper[5021]: I0121 15:39:37.403565 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-m25vd" Jan 21 15:39:37 crc kubenswrapper[5021]: I0121 15:39:37.410342 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ns92j\" (UniqueName: \"kubernetes.io/projected/0dc208ed-5c94-48b7-919c-89ad83b76459-kube-api-access-ns92j\") on node \"crc\" DevicePath \"\"" Jan 21 15:39:37 crc kubenswrapper[5021]: I0121 15:39:37.410393 5021 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0dc208ed-5c94-48b7-919c-89ad83b76459-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 15:39:37 crc kubenswrapper[5021]: I0121 15:39:37.410414 5021 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0dc208ed-5c94-48b7-919c-89ad83b76459-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 15:39:37 crc kubenswrapper[5021]: I0121 15:39:37.446039 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-m25vd"] Jan 21 15:39:37 crc kubenswrapper[5021]: I0121 15:39:37.450527 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-m25vd"] Jan 21 15:39:37 crc kubenswrapper[5021]: I0121 15:39:37.751057 5021 scope.go:117] "RemoveContainer" containerID="169d3f8874e0e84266c1de4b56327eec887516af611253fa5515ca7fae458149" Jan 21 15:39:37 crc kubenswrapper[5021]: I0121 15:39:37.864430 5021 scope.go:117] "RemoveContainer" containerID="15cc96cc3ee9192d88ddd1f120c575e9b2eeb5f127e53f22867dc43174477823" Jan 21 15:39:38 crc kubenswrapper[5021]: I0121 15:39:38.413194 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-psnh7" event={"ID":"41bb94a6-79f0-48ab-ad3d-16da1122223d","Type":"ContainerStarted","Data":"ea77a1c24d6fec9ce2b3a1c67046d843623b78b4911554794f9970a9ce305f09"} Jan 21 15:39:38 crc kubenswrapper[5021]: I0121 15:39:38.414807 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-init-7f8fb8b79-5jnkm" event={"ID":"70b7f82d-fa46-4ef3-b1f8-e790e3e4a540","Type":"ContainerStarted","Data":"1b164b2bff16843ed83fced4869bccb68e80d8ea6b82dc0778ff3f88e2984304"} Jan 21 15:39:38 crc kubenswrapper[5021]: I0121 15:39:38.415203 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-init-7f8fb8b79-5jnkm" Jan 21 15:39:38 crc kubenswrapper[5021]: I0121 15:39:38.437675 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-psnh7" podStartSLOduration=1.6809325400000001 podStartE2EDuration="8.437653643s" podCreationTimestamp="2026-01-21 15:39:30 +0000 UTC" firstStartedPulling="2026-01-21 15:39:31.33640902 +0000 UTC m=+912.871522949" lastFinishedPulling="2026-01-21 15:39:38.093130163 +0000 UTC m=+919.628244052" observedRunningTime="2026-01-21 15:39:38.431215186 +0000 UTC m=+919.966329095" watchObservedRunningTime="2026-01-21 15:39:38.437653643 +0000 UTC m=+919.972767532" Jan 21 15:39:38 crc kubenswrapper[5021]: I0121 15:39:38.473084 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-init-7f8fb8b79-5jnkm" podStartSLOduration=1.105808364 podStartE2EDuration="5.473060972s" podCreationTimestamp="2026-01-21 15:39:33 +0000 UTC" firstStartedPulling="2026-01-21 15:39:33.726946303 +0000 UTC m=+915.262060192" lastFinishedPulling="2026-01-21 15:39:38.094198921 +0000 UTC m=+919.629312800" observedRunningTime="2026-01-21 15:39:38.467949651 +0000 UTC m=+920.003063550" watchObservedRunningTime="2026-01-21 15:39:38.473060972 +0000 UTC m=+920.008174861" Jan 21 15:39:38 crc kubenswrapper[5021]: I0121 15:39:38.747167 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0dc208ed-5c94-48b7-919c-89ad83b76459" path="/var/lib/kubelet/pods/0dc208ed-5c94-48b7-919c-89ad83b76459/volumes" Jan 21 15:39:40 crc kubenswrapper[5021]: I0121 15:39:40.727679 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-psnh7" Jan 21 15:39:40 crc kubenswrapper[5021]: I0121 15:39:40.728081 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-psnh7" Jan 21 15:39:40 crc kubenswrapper[5021]: I0121 15:39:40.787504 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-psnh7" Jan 21 15:39:42 crc kubenswrapper[5021]: I0121 15:39:42.357074 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 15:39:42 crc kubenswrapper[5021]: I0121 15:39:42.357484 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 15:39:43 crc kubenswrapper[5021]: I0121 15:39:43.494882 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-init-7f8fb8b79-5jnkm" Jan 21 15:39:50 crc kubenswrapper[5021]: I0121 15:39:50.765174 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-psnh7" Jan 21 15:39:50 crc kubenswrapper[5021]: I0121 15:39:50.806951 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-psnh7"] Jan 21 15:39:51 crc kubenswrapper[5021]: I0121 15:39:51.493873 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-psnh7" podUID="41bb94a6-79f0-48ab-ad3d-16da1122223d" containerName="registry-server" containerID="cri-o://ea77a1c24d6fec9ce2b3a1c67046d843623b78b4911554794f9970a9ce305f09" gracePeriod=2 Jan 21 15:39:51 crc kubenswrapper[5021]: E0121 15:39:51.525687 5021 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod41bb94a6_79f0_48ab_ad3d_16da1122223d.slice/crio-ea77a1c24d6fec9ce2b3a1c67046d843623b78b4911554794f9970a9ce305f09.scope\": RecentStats: unable to find data in memory cache]" Jan 21 15:39:52 crc kubenswrapper[5021]: I0121 15:39:52.384827 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-psnh7" Jan 21 15:39:52 crc kubenswrapper[5021]: I0121 15:39:52.500749 5021 generic.go:334] "Generic (PLEG): container finished" podID="41bb94a6-79f0-48ab-ad3d-16da1122223d" containerID="ea77a1c24d6fec9ce2b3a1c67046d843623b78b4911554794f9970a9ce305f09" exitCode=0 Jan 21 15:39:52 crc kubenswrapper[5021]: I0121 15:39:52.500961 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-psnh7" event={"ID":"41bb94a6-79f0-48ab-ad3d-16da1122223d","Type":"ContainerDied","Data":"ea77a1c24d6fec9ce2b3a1c67046d843623b78b4911554794f9970a9ce305f09"} Jan 21 15:39:52 crc kubenswrapper[5021]: I0121 15:39:52.501293 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-psnh7" event={"ID":"41bb94a6-79f0-48ab-ad3d-16da1122223d","Type":"ContainerDied","Data":"f30f8c9c2e19d090168eaf2753194775ff02867193023a7781d4def3437c52ae"} Jan 21 15:39:52 crc kubenswrapper[5021]: I0121 15:39:52.501054 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-psnh7" Jan 21 15:39:52 crc kubenswrapper[5021]: I0121 15:39:52.501414 5021 scope.go:117] "RemoveContainer" containerID="ea77a1c24d6fec9ce2b3a1c67046d843623b78b4911554794f9970a9ce305f09" Jan 21 15:39:52 crc kubenswrapper[5021]: I0121 15:39:52.522873 5021 scope.go:117] "RemoveContainer" containerID="de32457e34dc695ffc488b7c0ed5e155275b7bf14ea912d4e54142863cac7e12" Jan 21 15:39:52 crc kubenswrapper[5021]: I0121 15:39:52.523447 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41bb94a6-79f0-48ab-ad3d-16da1122223d-utilities\") pod \"41bb94a6-79f0-48ab-ad3d-16da1122223d\" (UID: \"41bb94a6-79f0-48ab-ad3d-16da1122223d\") " Jan 21 15:39:52 crc kubenswrapper[5021]: I0121 15:39:52.523549 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rwtwg\" (UniqueName: \"kubernetes.io/projected/41bb94a6-79f0-48ab-ad3d-16da1122223d-kube-api-access-rwtwg\") pod \"41bb94a6-79f0-48ab-ad3d-16da1122223d\" (UID: \"41bb94a6-79f0-48ab-ad3d-16da1122223d\") " Jan 21 15:39:52 crc kubenswrapper[5021]: I0121 15:39:52.523661 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41bb94a6-79f0-48ab-ad3d-16da1122223d-catalog-content\") pod \"41bb94a6-79f0-48ab-ad3d-16da1122223d\" (UID: \"41bb94a6-79f0-48ab-ad3d-16da1122223d\") " Jan 21 15:39:52 crc kubenswrapper[5021]: I0121 15:39:52.525017 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/41bb94a6-79f0-48ab-ad3d-16da1122223d-utilities" (OuterVolumeSpecName: "utilities") pod "41bb94a6-79f0-48ab-ad3d-16da1122223d" (UID: "41bb94a6-79f0-48ab-ad3d-16da1122223d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:39:52 crc kubenswrapper[5021]: I0121 15:39:52.550180 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/41bb94a6-79f0-48ab-ad3d-16da1122223d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "41bb94a6-79f0-48ab-ad3d-16da1122223d" (UID: "41bb94a6-79f0-48ab-ad3d-16da1122223d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:39:52 crc kubenswrapper[5021]: I0121 15:39:52.551147 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/41bb94a6-79f0-48ab-ad3d-16da1122223d-kube-api-access-rwtwg" (OuterVolumeSpecName: "kube-api-access-rwtwg") pod "41bb94a6-79f0-48ab-ad3d-16da1122223d" (UID: "41bb94a6-79f0-48ab-ad3d-16da1122223d"). InnerVolumeSpecName "kube-api-access-rwtwg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:39:52 crc kubenswrapper[5021]: I0121 15:39:52.565244 5021 scope.go:117] "RemoveContainer" containerID="d3e0a21522dc74b4e46a6fe9e3c0142071dd5d5e0aa847d0b4443adfd859bf41" Jan 21 15:39:52 crc kubenswrapper[5021]: I0121 15:39:52.585630 5021 scope.go:117] "RemoveContainer" containerID="ea77a1c24d6fec9ce2b3a1c67046d843623b78b4911554794f9970a9ce305f09" Jan 21 15:39:52 crc kubenswrapper[5021]: E0121 15:39:52.586320 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ea77a1c24d6fec9ce2b3a1c67046d843623b78b4911554794f9970a9ce305f09\": container with ID starting with ea77a1c24d6fec9ce2b3a1c67046d843623b78b4911554794f9970a9ce305f09 not found: ID does not exist" containerID="ea77a1c24d6fec9ce2b3a1c67046d843623b78b4911554794f9970a9ce305f09" Jan 21 15:39:52 crc kubenswrapper[5021]: I0121 15:39:52.586385 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ea77a1c24d6fec9ce2b3a1c67046d843623b78b4911554794f9970a9ce305f09"} err="failed to get container status \"ea77a1c24d6fec9ce2b3a1c67046d843623b78b4911554794f9970a9ce305f09\": rpc error: code = NotFound desc = could not find container \"ea77a1c24d6fec9ce2b3a1c67046d843623b78b4911554794f9970a9ce305f09\": container with ID starting with ea77a1c24d6fec9ce2b3a1c67046d843623b78b4911554794f9970a9ce305f09 not found: ID does not exist" Jan 21 15:39:52 crc kubenswrapper[5021]: I0121 15:39:52.586434 5021 scope.go:117] "RemoveContainer" containerID="de32457e34dc695ffc488b7c0ed5e155275b7bf14ea912d4e54142863cac7e12" Jan 21 15:39:52 crc kubenswrapper[5021]: E0121 15:39:52.586837 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"de32457e34dc695ffc488b7c0ed5e155275b7bf14ea912d4e54142863cac7e12\": container with ID starting with de32457e34dc695ffc488b7c0ed5e155275b7bf14ea912d4e54142863cac7e12 not found: ID does not exist" containerID="de32457e34dc695ffc488b7c0ed5e155275b7bf14ea912d4e54142863cac7e12" Jan 21 15:39:52 crc kubenswrapper[5021]: I0121 15:39:52.586860 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"de32457e34dc695ffc488b7c0ed5e155275b7bf14ea912d4e54142863cac7e12"} err="failed to get container status \"de32457e34dc695ffc488b7c0ed5e155275b7bf14ea912d4e54142863cac7e12\": rpc error: code = NotFound desc = could not find container \"de32457e34dc695ffc488b7c0ed5e155275b7bf14ea912d4e54142863cac7e12\": container with ID starting with de32457e34dc695ffc488b7c0ed5e155275b7bf14ea912d4e54142863cac7e12 not found: ID does not exist" Jan 21 15:39:52 crc kubenswrapper[5021]: I0121 15:39:52.586873 5021 scope.go:117] "RemoveContainer" containerID="d3e0a21522dc74b4e46a6fe9e3c0142071dd5d5e0aa847d0b4443adfd859bf41" Jan 21 15:39:52 crc kubenswrapper[5021]: E0121 15:39:52.587318 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d3e0a21522dc74b4e46a6fe9e3c0142071dd5d5e0aa847d0b4443adfd859bf41\": container with ID starting with d3e0a21522dc74b4e46a6fe9e3c0142071dd5d5e0aa847d0b4443adfd859bf41 not found: ID does not exist" containerID="d3e0a21522dc74b4e46a6fe9e3c0142071dd5d5e0aa847d0b4443adfd859bf41" Jan 21 15:39:52 crc kubenswrapper[5021]: I0121 15:39:52.587336 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d3e0a21522dc74b4e46a6fe9e3c0142071dd5d5e0aa847d0b4443adfd859bf41"} err="failed to get container status \"d3e0a21522dc74b4e46a6fe9e3c0142071dd5d5e0aa847d0b4443adfd859bf41\": rpc error: code = NotFound desc = could not find container \"d3e0a21522dc74b4e46a6fe9e3c0142071dd5d5e0aa847d0b4443adfd859bf41\": container with ID starting with d3e0a21522dc74b4e46a6fe9e3c0142071dd5d5e0aa847d0b4443adfd859bf41 not found: ID does not exist" Jan 21 15:39:52 crc kubenswrapper[5021]: I0121 15:39:52.624965 5021 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41bb94a6-79f0-48ab-ad3d-16da1122223d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 15:39:52 crc kubenswrapper[5021]: I0121 15:39:52.625006 5021 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41bb94a6-79f0-48ab-ad3d-16da1122223d-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 15:39:52 crc kubenswrapper[5021]: I0121 15:39:52.625019 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rwtwg\" (UniqueName: \"kubernetes.io/projected/41bb94a6-79f0-48ab-ad3d-16da1122223d-kube-api-access-rwtwg\") on node \"crc\" DevicePath \"\"" Jan 21 15:39:52 crc kubenswrapper[5021]: I0121 15:39:52.830714 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-psnh7"] Jan 21 15:39:52 crc kubenswrapper[5021]: I0121 15:39:52.836108 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-psnh7"] Jan 21 15:39:54 crc kubenswrapper[5021]: I0121 15:39:54.747166 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="41bb94a6-79f0-48ab-ad3d-16da1122223d" path="/var/lib/kubelet/pods/41bb94a6-79f0-48ab-ad3d-16da1122223d/volumes" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.247858 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7ddb5c749-x4l62"] Jan 21 15:40:02 crc kubenswrapper[5021]: E0121 15:40:02.248781 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0dc208ed-5c94-48b7-919c-89ad83b76459" containerName="extract-utilities" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.248797 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="0dc208ed-5c94-48b7-919c-89ad83b76459" containerName="extract-utilities" Jan 21 15:40:02 crc kubenswrapper[5021]: E0121 15:40:02.248809 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0dc208ed-5c94-48b7-919c-89ad83b76459" containerName="registry-server" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.248816 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="0dc208ed-5c94-48b7-919c-89ad83b76459" containerName="registry-server" Jan 21 15:40:02 crc kubenswrapper[5021]: E0121 15:40:02.248824 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41bb94a6-79f0-48ab-ad3d-16da1122223d" containerName="extract-utilities" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.248830 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="41bb94a6-79f0-48ab-ad3d-16da1122223d" containerName="extract-utilities" Jan 21 15:40:02 crc kubenswrapper[5021]: E0121 15:40:02.248848 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0dc208ed-5c94-48b7-919c-89ad83b76459" containerName="extract-content" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.248854 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="0dc208ed-5c94-48b7-919c-89ad83b76459" containerName="extract-content" Jan 21 15:40:02 crc kubenswrapper[5021]: E0121 15:40:02.248868 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41bb94a6-79f0-48ab-ad3d-16da1122223d" containerName="extract-content" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.248876 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="41bb94a6-79f0-48ab-ad3d-16da1122223d" containerName="extract-content" Jan 21 15:40:02 crc kubenswrapper[5021]: E0121 15:40:02.248886 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41bb94a6-79f0-48ab-ad3d-16da1122223d" containerName="registry-server" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.248892 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="41bb94a6-79f0-48ab-ad3d-16da1122223d" containerName="registry-server" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.249020 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="0dc208ed-5c94-48b7-919c-89ad83b76459" containerName="registry-server" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.249028 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="41bb94a6-79f0-48ab-ad3d-16da1122223d" containerName="registry-server" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.249458 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7ddb5c749-x4l62" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.251603 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-9b68f5989-rkltx"] Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.252380 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-9b68f5989-rkltx" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.252534 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-5rn2j" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.262689 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-gm98s" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.269461 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-9f958b845-tjspf"] Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.270576 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-9f958b845-tjspf" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.273451 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-8s2l8" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.284284 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-9b68f5989-rkltx"] Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.289664 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7ddb5c749-x4l62"] Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.302723 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-c6994669c-qswjh"] Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.303746 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-c6994669c-qswjh" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.307391 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-9f958b845-tjspf"] Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.310500 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-4nglf" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.334893 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-c6994669c-qswjh"] Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.344445 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-594c8c9d5d-zl9lj"] Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.345489 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-zl9lj" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.349355 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-kkzqc" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.355678 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-65dgv\" (UniqueName: \"kubernetes.io/projected/33f53fd4-7cbc-4e1e-a72a-e48eee9ca274-kube-api-access-65dgv\") pod \"cinder-operator-controller-manager-9b68f5989-rkltx\" (UID: \"33f53fd4-7cbc-4e1e-a72a-e48eee9ca274\") " pod="openstack-operators/cinder-operator-controller-manager-9b68f5989-rkltx" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.356038 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l2lzz\" (UniqueName: \"kubernetes.io/projected/6d2f5c7c-f0d7-405b-b9cf-427ea840f7c0-kube-api-access-l2lzz\") pod \"designate-operator-controller-manager-9f958b845-tjspf\" (UID: \"6d2f5c7c-f0d7-405b-b9cf-427ea840f7c0\") " pod="openstack-operators/designate-operator-controller-manager-9f958b845-tjspf" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.356162 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2w6lp\" (UniqueName: \"kubernetes.io/projected/724463d5-2779-4504-bbd1-4c12353a665c-kube-api-access-2w6lp\") pod \"barbican-operator-controller-manager-7ddb5c749-x4l62\" (UID: \"724463d5-2779-4504-bbd1-4c12353a665c\") " pod="openstack-operators/barbican-operator-controller-manager-7ddb5c749-x4l62" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.370571 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-77d5c5b54f-s6lsx"] Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.377270 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-s6lsx" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.384249 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-cftxd" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.407999 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-594c8c9d5d-zl9lj"] Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.458958 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l2lzz\" (UniqueName: \"kubernetes.io/projected/6d2f5c7c-f0d7-405b-b9cf-427ea840f7c0-kube-api-access-l2lzz\") pod \"designate-operator-controller-manager-9f958b845-tjspf\" (UID: \"6d2f5c7c-f0d7-405b-b9cf-427ea840f7c0\") " pod="openstack-operators/designate-operator-controller-manager-9f958b845-tjspf" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.459056 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2w6lp\" (UniqueName: \"kubernetes.io/projected/724463d5-2779-4504-bbd1-4c12353a665c-kube-api-access-2w6lp\") pod \"barbican-operator-controller-manager-7ddb5c749-x4l62\" (UID: \"724463d5-2779-4504-bbd1-4c12353a665c\") " pod="openstack-operators/barbican-operator-controller-manager-7ddb5c749-x4l62" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.459136 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n4x75\" (UniqueName: \"kubernetes.io/projected/8e5df137-5a39-434e-9ed8-cd984d3cfecb-kube-api-access-n4x75\") pod \"glance-operator-controller-manager-c6994669c-qswjh\" (UID: \"8e5df137-5a39-434e-9ed8-cd984d3cfecb\") " pod="openstack-operators/glance-operator-controller-manager-c6994669c-qswjh" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.459170 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dxw79\" (UniqueName: \"kubernetes.io/projected/fcd8d00d-0a93-400d-8c23-eb51dbf56a35-kube-api-access-dxw79\") pod \"horizon-operator-controller-manager-77d5c5b54f-s6lsx\" (UID: \"fcd8d00d-0a93-400d-8c23-eb51dbf56a35\") " pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-s6lsx" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.459233 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-65dgv\" (UniqueName: \"kubernetes.io/projected/33f53fd4-7cbc-4e1e-a72a-e48eee9ca274-kube-api-access-65dgv\") pod \"cinder-operator-controller-manager-9b68f5989-rkltx\" (UID: \"33f53fd4-7cbc-4e1e-a72a-e48eee9ca274\") " pod="openstack-operators/cinder-operator-controller-manager-9b68f5989-rkltx" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.459281 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sfxqh\" (UniqueName: \"kubernetes.io/projected/7645cfbe-28a4-4098-af64-3be341c2306f-kube-api-access-sfxqh\") pod \"heat-operator-controller-manager-594c8c9d5d-zl9lj\" (UID: \"7645cfbe-28a4-4098-af64-3be341c2306f\") " pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-zl9lj" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.472817 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-77d5c5b54f-s6lsx"] Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.507135 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2w6lp\" (UniqueName: \"kubernetes.io/projected/724463d5-2779-4504-bbd1-4c12353a665c-kube-api-access-2w6lp\") pod \"barbican-operator-controller-manager-7ddb5c749-x4l62\" (UID: \"724463d5-2779-4504-bbd1-4c12353a665c\") " pod="openstack-operators/barbican-operator-controller-manager-7ddb5c749-x4l62" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.515262 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-65dgv\" (UniqueName: \"kubernetes.io/projected/33f53fd4-7cbc-4e1e-a72a-e48eee9ca274-kube-api-access-65dgv\") pod \"cinder-operator-controller-manager-9b68f5989-rkltx\" (UID: \"33f53fd4-7cbc-4e1e-a72a-e48eee9ca274\") " pod="openstack-operators/cinder-operator-controller-manager-9b68f5989-rkltx" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.524153 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-77c48c7859-njwdd"] Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.525533 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-77c48c7859-njwdd" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.534700 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-sv5l9" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.535268 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.550837 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l2lzz\" (UniqueName: \"kubernetes.io/projected/6d2f5c7c-f0d7-405b-b9cf-427ea840f7c0-kube-api-access-l2lzz\") pod \"designate-operator-controller-manager-9f958b845-tjspf\" (UID: \"6d2f5c7c-f0d7-405b-b9cf-427ea840f7c0\") " pod="openstack-operators/designate-operator-controller-manager-9f958b845-tjspf" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.562447 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sfxqh\" (UniqueName: \"kubernetes.io/projected/7645cfbe-28a4-4098-af64-3be341c2306f-kube-api-access-sfxqh\") pod \"heat-operator-controller-manager-594c8c9d5d-zl9lj\" (UID: \"7645cfbe-28a4-4098-af64-3be341c2306f\") " pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-zl9lj" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.562557 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n4x75\" (UniqueName: \"kubernetes.io/projected/8e5df137-5a39-434e-9ed8-cd984d3cfecb-kube-api-access-n4x75\") pod \"glance-operator-controller-manager-c6994669c-qswjh\" (UID: \"8e5df137-5a39-434e-9ed8-cd984d3cfecb\") " pod="openstack-operators/glance-operator-controller-manager-c6994669c-qswjh" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.562594 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dxw79\" (UniqueName: \"kubernetes.io/projected/fcd8d00d-0a93-400d-8c23-eb51dbf56a35-kube-api-access-dxw79\") pod \"horizon-operator-controller-manager-77d5c5b54f-s6lsx\" (UID: \"fcd8d00d-0a93-400d-8c23-eb51dbf56a35\") " pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-s6lsx" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.563319 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-78757b4889-2bt46"] Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.564430 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-78757b4889-2bt46" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.572314 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-9ckwl" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.592038 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7ddb5c749-x4l62" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.593668 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-9b68f5989-rkltx" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.594077 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sfxqh\" (UniqueName: \"kubernetes.io/projected/7645cfbe-28a4-4098-af64-3be341c2306f-kube-api-access-sfxqh\") pod \"heat-operator-controller-manager-594c8c9d5d-zl9lj\" (UID: \"7645cfbe-28a4-4098-af64-3be341c2306f\") " pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-zl9lj" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.601820 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n4x75\" (UniqueName: \"kubernetes.io/projected/8e5df137-5a39-434e-9ed8-cd984d3cfecb-kube-api-access-n4x75\") pod \"glance-operator-controller-manager-c6994669c-qswjh\" (UID: \"8e5df137-5a39-434e-9ed8-cd984d3cfecb\") " pod="openstack-operators/glance-operator-controller-manager-c6994669c-qswjh" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.603703 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-9f958b845-tjspf" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.610389 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dxw79\" (UniqueName: \"kubernetes.io/projected/fcd8d00d-0a93-400d-8c23-eb51dbf56a35-kube-api-access-dxw79\") pod \"horizon-operator-controller-manager-77d5c5b54f-s6lsx\" (UID: \"fcd8d00d-0a93-400d-8c23-eb51dbf56a35\") " pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-s6lsx" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.616901 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-77c48c7859-njwdd"] Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.633171 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-c6994669c-qswjh" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.641884 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-78757b4889-2bt46"] Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.655005 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-767fdc4f47-wj9sz"] Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.656699 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-767fdc4f47-wj9sz" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.665453 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/696e3c0f-78c0-4517-8def-49fbe8728f48-cert\") pod \"infra-operator-controller-manager-77c48c7859-njwdd\" (UID: \"696e3c0f-78c0-4517-8def-49fbe8728f48\") " pod="openstack-operators/infra-operator-controller-manager-77c48c7859-njwdd" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.665534 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5kdp5\" (UniqueName: \"kubernetes.io/projected/696e3c0f-78c0-4517-8def-49fbe8728f48-kube-api-access-5kdp5\") pod \"infra-operator-controller-manager-77c48c7859-njwdd\" (UID: \"696e3c0f-78c0-4517-8def-49fbe8728f48\") " pod="openstack-operators/infra-operator-controller-manager-77c48c7859-njwdd" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.665698 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5tztk\" (UniqueName: \"kubernetes.io/projected/9fdec40b-ea8d-4d5e-82ac-27b0a76f450b-kube-api-access-5tztk\") pod \"ironic-operator-controller-manager-78757b4889-2bt46\" (UID: \"9fdec40b-ea8d-4d5e-82ac-27b0a76f450b\") " pod="openstack-operators/ironic-operator-controller-manager-78757b4889-2bt46" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.670587 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-h8c4m" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.674215 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-zl9lj" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.686434 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-767fdc4f47-wj9sz"] Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.706380 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-s6lsx" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.708241 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-864f6b75bf-9bzn7"] Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.709196 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-864f6b75bf-9bzn7" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.731896 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-86cnd" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.766064 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-864f6b75bf-9bzn7"] Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.770739 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5kdp5\" (UniqueName: \"kubernetes.io/projected/696e3c0f-78c0-4517-8def-49fbe8728f48-kube-api-access-5kdp5\") pod \"infra-operator-controller-manager-77c48c7859-njwdd\" (UID: \"696e3c0f-78c0-4517-8def-49fbe8728f48\") " pod="openstack-operators/infra-operator-controller-manager-77c48c7859-njwdd" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.770828 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ljhtb\" (UniqueName: \"kubernetes.io/projected/f9c1fef7-2823-4ebc-866a-adea991f6b5c-kube-api-access-ljhtb\") pod \"manila-operator-controller-manager-864f6b75bf-9bzn7\" (UID: \"f9c1fef7-2823-4ebc-866a-adea991f6b5c\") " pod="openstack-operators/manila-operator-controller-manager-864f6b75bf-9bzn7" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.770930 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5tztk\" (UniqueName: \"kubernetes.io/projected/9fdec40b-ea8d-4d5e-82ac-27b0a76f450b-kube-api-access-5tztk\") pod \"ironic-operator-controller-manager-78757b4889-2bt46\" (UID: \"9fdec40b-ea8d-4d5e-82ac-27b0a76f450b\") " pod="openstack-operators/ironic-operator-controller-manager-78757b4889-2bt46" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.770982 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/696e3c0f-78c0-4517-8def-49fbe8728f48-cert\") pod \"infra-operator-controller-manager-77c48c7859-njwdd\" (UID: \"696e3c0f-78c0-4517-8def-49fbe8728f48\") " pod="openstack-operators/infra-operator-controller-manager-77c48c7859-njwdd" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.771010 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sn6hx\" (UniqueName: \"kubernetes.io/projected/c20dfe07-4e4b-44e0-a260-ff4958985c0c-kube-api-access-sn6hx\") pod \"keystone-operator-controller-manager-767fdc4f47-wj9sz\" (UID: \"c20dfe07-4e4b-44e0-a260-ff4958985c0c\") " pod="openstack-operators/keystone-operator-controller-manager-767fdc4f47-wj9sz" Jan 21 15:40:02 crc kubenswrapper[5021]: E0121 15:40:02.771734 5021 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 21 15:40:02 crc kubenswrapper[5021]: E0121 15:40:02.771793 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/696e3c0f-78c0-4517-8def-49fbe8728f48-cert podName:696e3c0f-78c0-4517-8def-49fbe8728f48 nodeName:}" failed. No retries permitted until 2026-01-21 15:40:03.271775294 +0000 UTC m=+944.806889183 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/696e3c0f-78c0-4517-8def-49fbe8728f48-cert") pod "infra-operator-controller-manager-77c48c7859-njwdd" (UID: "696e3c0f-78c0-4517-8def-49fbe8728f48") : secret "infra-operator-webhook-server-cert" not found Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.796996 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-c87fff755-tr25w"] Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.798065 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-tr25w" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.815945 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-cb4666565-jk57l"] Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.816857 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-cb4666565-jk57l" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.817678 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-ltscz" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.818745 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5kdp5\" (UniqueName: \"kubernetes.io/projected/696e3c0f-78c0-4517-8def-49fbe8728f48-kube-api-access-5kdp5\") pod \"infra-operator-controller-manager-77c48c7859-njwdd\" (UID: \"696e3c0f-78c0-4517-8def-49fbe8728f48\") " pod="openstack-operators/infra-operator-controller-manager-77c48c7859-njwdd" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.837497 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5tztk\" (UniqueName: \"kubernetes.io/projected/9fdec40b-ea8d-4d5e-82ac-27b0a76f450b-kube-api-access-5tztk\") pod \"ironic-operator-controller-manager-78757b4889-2bt46\" (UID: \"9fdec40b-ea8d-4d5e-82ac-27b0a76f450b\") " pod="openstack-operators/ironic-operator-controller-manager-78757b4889-2bt46" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.840097 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-c87fff755-tr25w"] Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.855201 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-4t7l8" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.855692 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-cb4666565-jk57l"] Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.863979 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-65849867d6-xm9sm"] Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.865307 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-65849867d6-xm9sm" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.876474 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sn6hx\" (UniqueName: \"kubernetes.io/projected/c20dfe07-4e4b-44e0-a260-ff4958985c0c-kube-api-access-sn6hx\") pod \"keystone-operator-controller-manager-767fdc4f47-wj9sz\" (UID: \"c20dfe07-4e4b-44e0-a260-ff4958985c0c\") " pod="openstack-operators/keystone-operator-controller-manager-767fdc4f47-wj9sz" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.876554 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4kctg\" (UniqueName: \"kubernetes.io/projected/4180df4a-4632-4c29-b5cf-a597b93d4541-kube-api-access-4kctg\") pod \"mariadb-operator-controller-manager-c87fff755-tr25w\" (UID: \"4180df4a-4632-4c29-b5cf-a597b93d4541\") " pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-tr25w" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.876606 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ljhtb\" (UniqueName: \"kubernetes.io/projected/f9c1fef7-2823-4ebc-866a-adea991f6b5c-kube-api-access-ljhtb\") pod \"manila-operator-controller-manager-864f6b75bf-9bzn7\" (UID: \"f9c1fef7-2823-4ebc-866a-adea991f6b5c\") " pod="openstack-operators/manila-operator-controller-manager-864f6b75bf-9bzn7" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.878004 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-fqgkf" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.926034 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-65849867d6-xm9sm"] Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.938324 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-78757b4889-2bt46" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.939690 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sn6hx\" (UniqueName: \"kubernetes.io/projected/c20dfe07-4e4b-44e0-a260-ff4958985c0c-kube-api-access-sn6hx\") pod \"keystone-operator-controller-manager-767fdc4f47-wj9sz\" (UID: \"c20dfe07-4e4b-44e0-a260-ff4958985c0c\") " pod="openstack-operators/keystone-operator-controller-manager-767fdc4f47-wj9sz" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.946657 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-jgqdp"] Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.950095 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-jgqdp" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.957610 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-tn8fp" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.972823 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-jgqdp"] Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.979865 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ljhtb\" (UniqueName: \"kubernetes.io/projected/f9c1fef7-2823-4ebc-866a-adea991f6b5c-kube-api-access-ljhtb\") pod \"manila-operator-controller-manager-864f6b75bf-9bzn7\" (UID: \"f9c1fef7-2823-4ebc-866a-adea991f6b5c\") " pod="openstack-operators/manila-operator-controller-manager-864f6b75bf-9bzn7" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.985848 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4kctg\" (UniqueName: \"kubernetes.io/projected/4180df4a-4632-4c29-b5cf-a597b93d4541-kube-api-access-4kctg\") pod \"mariadb-operator-controller-manager-c87fff755-tr25w\" (UID: \"4180df4a-4632-4c29-b5cf-a597b93d4541\") " pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-tr25w" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.985923 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hk7fz\" (UniqueName: \"kubernetes.io/projected/dd66a6c4-dee4-4079-ac1c-d838cc27f752-kube-api-access-hk7fz\") pod \"neutron-operator-controller-manager-cb4666565-jk57l\" (UID: \"dd66a6c4-dee4-4079-ac1c-d838cc27f752\") " pod="openstack-operators/neutron-operator-controller-manager-cb4666565-jk57l" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.985967 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tvpms\" (UniqueName: \"kubernetes.io/projected/569260c0-7874-41fa-9114-66643a79cdfe-kube-api-access-tvpms\") pod \"nova-operator-controller-manager-65849867d6-xm9sm\" (UID: \"569260c0-7874-41fa-9114-66643a79cdfe\") " pod="openstack-operators/nova-operator-controller-manager-65849867d6-xm9sm" Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.998252 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854sp4g6"] Jan 21 15:40:02 crc kubenswrapper[5021]: I0121 15:40:02.999223 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854sp4g6" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.008227 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-767fdc4f47-wj9sz" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.019389 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-55db956ddc-kmmh6"] Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.023339 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-kmmh6" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.026746 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-2n6cl" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.026924 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.033313 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-pz9mh" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.036985 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-55db956ddc-kmmh6"] Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.052007 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854sp4g6"] Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.066020 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-864f6b75bf-9bzn7" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.068023 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-686df47fcb-tcnr8"] Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.069310 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-686df47fcb-tcnr8" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.079014 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-46n4r" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.083987 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-686df47fcb-tcnr8"] Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.090256 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2glvr\" (UniqueName: \"kubernetes.io/projected/0ca88d8a-abb8-498b-9588-376e3cc3a49e-kube-api-access-2glvr\") pod \"ovn-operator-controller-manager-55db956ddc-kmmh6\" (UID: \"0ca88d8a-abb8-498b-9588-376e3cc3a49e\") " pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-kmmh6" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.090303 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/25709945-8415-492c-a829-fd79f3fbe521-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854sp4g6\" (UID: \"25709945-8415-492c-a829-fd79f3fbe521\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854sp4g6" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.090326 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hk7fz\" (UniqueName: \"kubernetes.io/projected/dd66a6c4-dee4-4079-ac1c-d838cc27f752-kube-api-access-hk7fz\") pod \"neutron-operator-controller-manager-cb4666565-jk57l\" (UID: \"dd66a6c4-dee4-4079-ac1c-d838cc27f752\") " pod="openstack-operators/neutron-operator-controller-manager-cb4666565-jk57l" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.090359 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tvpms\" (UniqueName: \"kubernetes.io/projected/569260c0-7874-41fa-9114-66643a79cdfe-kube-api-access-tvpms\") pod \"nova-operator-controller-manager-65849867d6-xm9sm\" (UID: \"569260c0-7874-41fa-9114-66643a79cdfe\") " pod="openstack-operators/nova-operator-controller-manager-65849867d6-xm9sm" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.090380 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nhp6q\" (UniqueName: \"kubernetes.io/projected/25709945-8415-492c-a829-fd79f3fbe521-kube-api-access-nhp6q\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854sp4g6\" (UID: \"25709945-8415-492c-a829-fd79f3fbe521\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854sp4g6" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.090417 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lfrhr\" (UniqueName: \"kubernetes.io/projected/48921cb7-8983-4b8e-87cd-3316190ede3e-kube-api-access-lfrhr\") pod \"octavia-operator-controller-manager-7fc9b76cf6-jgqdp\" (UID: \"48921cb7-8983-4b8e-87cd-3316190ede3e\") " pod="openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-jgqdp" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.111985 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-96kl8"] Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.113104 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-96kl8" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.135126 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-bpfjw" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.135473 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4kctg\" (UniqueName: \"kubernetes.io/projected/4180df4a-4632-4c29-b5cf-a597b93d4541-kube-api-access-4kctg\") pod \"mariadb-operator-controller-manager-c87fff755-tr25w\" (UID: \"4180df4a-4632-4c29-b5cf-a597b93d4541\") " pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-tr25w" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.136204 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tvpms\" (UniqueName: \"kubernetes.io/projected/569260c0-7874-41fa-9114-66643a79cdfe-kube-api-access-tvpms\") pod \"nova-operator-controller-manager-65849867d6-xm9sm\" (UID: \"569260c0-7874-41fa-9114-66643a79cdfe\") " pod="openstack-operators/nova-operator-controller-manager-65849867d6-xm9sm" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.145402 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-tr25w" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.152840 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-65849867d6-xm9sm" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.158724 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hk7fz\" (UniqueName: \"kubernetes.io/projected/dd66a6c4-dee4-4079-ac1c-d838cc27f752-kube-api-access-hk7fz\") pod \"neutron-operator-controller-manager-cb4666565-jk57l\" (UID: \"dd66a6c4-dee4-4079-ac1c-d838cc27f752\") " pod="openstack-operators/neutron-operator-controller-manager-cb4666565-jk57l" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.193416 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2glvr\" (UniqueName: \"kubernetes.io/projected/0ca88d8a-abb8-498b-9588-376e3cc3a49e-kube-api-access-2glvr\") pod \"ovn-operator-controller-manager-55db956ddc-kmmh6\" (UID: \"0ca88d8a-abb8-498b-9588-376e3cc3a49e\") " pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-kmmh6" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.193488 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/25709945-8415-492c-a829-fd79f3fbe521-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854sp4g6\" (UID: \"25709945-8415-492c-a829-fd79f3fbe521\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854sp4g6" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.193555 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tczkp\" (UniqueName: \"kubernetes.io/projected/00e6bc09-d424-4ed1-b62a-b6fadc7416ec-kube-api-access-tczkp\") pod \"placement-operator-controller-manager-686df47fcb-tcnr8\" (UID: \"00e6bc09-d424-4ed1-b62a-b6fadc7416ec\") " pod="openstack-operators/placement-operator-controller-manager-686df47fcb-tcnr8" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.193618 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nhp6q\" (UniqueName: \"kubernetes.io/projected/25709945-8415-492c-a829-fd79f3fbe521-kube-api-access-nhp6q\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854sp4g6\" (UID: \"25709945-8415-492c-a829-fd79f3fbe521\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854sp4g6" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.193671 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lfrhr\" (UniqueName: \"kubernetes.io/projected/48921cb7-8983-4b8e-87cd-3316190ede3e-kube-api-access-lfrhr\") pod \"octavia-operator-controller-manager-7fc9b76cf6-jgqdp\" (UID: \"48921cb7-8983-4b8e-87cd-3316190ede3e\") " pod="openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-jgqdp" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.193808 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vzfqk\" (UniqueName: \"kubernetes.io/projected/46f232dd-a469-4c74-b456-ba1b8f80b32a-kube-api-access-vzfqk\") pod \"telemetry-operator-controller-manager-5f8f495fcf-96kl8\" (UID: \"46f232dd-a469-4c74-b456-ba1b8f80b32a\") " pod="openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-96kl8" Jan 21 15:40:03 crc kubenswrapper[5021]: E0121 15:40:03.194559 5021 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 21 15:40:03 crc kubenswrapper[5021]: E0121 15:40:03.194629 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/25709945-8415-492c-a829-fd79f3fbe521-cert podName:25709945-8415-492c-a829-fd79f3fbe521 nodeName:}" failed. No retries permitted until 2026-01-21 15:40:03.694609788 +0000 UTC m=+945.229723677 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/25709945-8415-492c-a829-fd79f3fbe521-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b854sp4g6" (UID: "25709945-8415-492c-a829-fd79f3fbe521") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.198305 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-cb4666565-jk57l" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.223737 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-96kl8"] Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.259923 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nhp6q\" (UniqueName: \"kubernetes.io/projected/25709945-8415-492c-a829-fd79f3fbe521-kube-api-access-nhp6q\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854sp4g6\" (UID: \"25709945-8415-492c-a829-fd79f3fbe521\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854sp4g6" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.302013 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/696e3c0f-78c0-4517-8def-49fbe8728f48-cert\") pod \"infra-operator-controller-manager-77c48c7859-njwdd\" (UID: \"696e3c0f-78c0-4517-8def-49fbe8728f48\") " pod="openstack-operators/infra-operator-controller-manager-77c48c7859-njwdd" Jan 21 15:40:03 crc kubenswrapper[5021]: E0121 15:40:03.302227 5021 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 21 15:40:03 crc kubenswrapper[5021]: E0121 15:40:03.302324 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/696e3c0f-78c0-4517-8def-49fbe8728f48-cert podName:696e3c0f-78c0-4517-8def-49fbe8728f48 nodeName:}" failed. No retries permitted until 2026-01-21 15:40:04.302291536 +0000 UTC m=+945.837405425 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/696e3c0f-78c0-4517-8def-49fbe8728f48-cert") pod "infra-operator-controller-manager-77c48c7859-njwdd" (UID: "696e3c0f-78c0-4517-8def-49fbe8728f48") : secret "infra-operator-webhook-server-cert" not found Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.302469 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vzfqk\" (UniqueName: \"kubernetes.io/projected/46f232dd-a469-4c74-b456-ba1b8f80b32a-kube-api-access-vzfqk\") pod \"telemetry-operator-controller-manager-5f8f495fcf-96kl8\" (UID: \"46f232dd-a469-4c74-b456-ba1b8f80b32a\") " pod="openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-96kl8" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.303090 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tczkp\" (UniqueName: \"kubernetes.io/projected/00e6bc09-d424-4ed1-b62a-b6fadc7416ec-kube-api-access-tczkp\") pod \"placement-operator-controller-manager-686df47fcb-tcnr8\" (UID: \"00e6bc09-d424-4ed1-b62a-b6fadc7416ec\") " pod="openstack-operators/placement-operator-controller-manager-686df47fcb-tcnr8" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.394337 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vzfqk\" (UniqueName: \"kubernetes.io/projected/46f232dd-a469-4c74-b456-ba1b8f80b32a-kube-api-access-vzfqk\") pod \"telemetry-operator-controller-manager-5f8f495fcf-96kl8\" (UID: \"46f232dd-a469-4c74-b456-ba1b8f80b32a\") " pod="openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-96kl8" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.394666 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2glvr\" (UniqueName: \"kubernetes.io/projected/0ca88d8a-abb8-498b-9588-376e3cc3a49e-kube-api-access-2glvr\") pod \"ovn-operator-controller-manager-55db956ddc-kmmh6\" (UID: \"0ca88d8a-abb8-498b-9588-376e3cc3a49e\") " pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-kmmh6" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.398556 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-7cd8bc9dbb-r274t"] Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.399502 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-7cd8bc9dbb-r274t" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.412240 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tczkp\" (UniqueName: \"kubernetes.io/projected/00e6bc09-d424-4ed1-b62a-b6fadc7416ec-kube-api-access-tczkp\") pod \"placement-operator-controller-manager-686df47fcb-tcnr8\" (UID: \"00e6bc09-d424-4ed1-b62a-b6fadc7416ec\") " pod="openstack-operators/placement-operator-controller-manager-686df47fcb-tcnr8" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.418004 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-c4wrz" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.425272 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-64cd966744-p4nrl"] Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.426466 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-64cd966744-p4nrl" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.435584 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-svkrk" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.443638 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lfrhr\" (UniqueName: \"kubernetes.io/projected/48921cb7-8983-4b8e-87cd-3316190ede3e-kube-api-access-lfrhr\") pod \"octavia-operator-controller-manager-7fc9b76cf6-jgqdp\" (UID: \"48921cb7-8983-4b8e-87cd-3316190ede3e\") " pod="openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-jgqdp" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.449879 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-7cd8bc9dbb-r274t"] Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.465339 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-64cd966744-p4nrl"] Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.483333 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-85dd56d4cc-msh7h"] Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.484813 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-85dd56d4cc-msh7h" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.489437 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-85dd56d4cc-msh7h"] Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.494891 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-lznxn" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.515185 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2mvf2\" (UniqueName: \"kubernetes.io/projected/25190772-2e7e-4e99-9df6-727b970a7930-kube-api-access-2mvf2\") pod \"test-operator-controller-manager-7cd8bc9dbb-r274t\" (UID: \"25190772-2e7e-4e99-9df6-727b970a7930\") " pod="openstack-operators/test-operator-controller-manager-7cd8bc9dbb-r274t" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.515278 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t2rcf\" (UniqueName: \"kubernetes.io/projected/794724a4-8705-4860-a126-6baefc733a24-kube-api-access-t2rcf\") pod \"watcher-operator-controller-manager-64cd966744-p4nrl\" (UID: \"794724a4-8705-4860-a126-6baefc733a24\") " pod="openstack-operators/watcher-operator-controller-manager-64cd966744-p4nrl" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.525371 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-jgqdp" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.528114 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-58495d798b-lxjdg"] Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.529275 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-58495d798b-lxjdg" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.536494 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.537159 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.538509 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-ntddp" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.553272 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-58495d798b-lxjdg"] Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.595042 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-x6zvw"] Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.596525 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-x6zvw" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.599343 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-v2hl5" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.606206 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-686df47fcb-tcnr8" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.616410 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/727fe8cb-51ad-433f-90e1-5998b948799a-metrics-certs\") pod \"openstack-operator-controller-manager-58495d798b-lxjdg\" (UID: \"727fe8cb-51ad-433f-90e1-5998b948799a\") " pod="openstack-operators/openstack-operator-controller-manager-58495d798b-lxjdg" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.616470 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2qlng\" (UniqueName: \"kubernetes.io/projected/727fe8cb-51ad-433f-90e1-5998b948799a-kube-api-access-2qlng\") pod \"openstack-operator-controller-manager-58495d798b-lxjdg\" (UID: \"727fe8cb-51ad-433f-90e1-5998b948799a\") " pod="openstack-operators/openstack-operator-controller-manager-58495d798b-lxjdg" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.616500 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t2rcf\" (UniqueName: \"kubernetes.io/projected/794724a4-8705-4860-a126-6baefc733a24-kube-api-access-t2rcf\") pod \"watcher-operator-controller-manager-64cd966744-p4nrl\" (UID: \"794724a4-8705-4860-a126-6baefc733a24\") " pod="openstack-operators/watcher-operator-controller-manager-64cd966744-p4nrl" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.616564 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/727fe8cb-51ad-433f-90e1-5998b948799a-webhook-certs\") pod \"openstack-operator-controller-manager-58495d798b-lxjdg\" (UID: \"727fe8cb-51ad-433f-90e1-5998b948799a\") " pod="openstack-operators/openstack-operator-controller-manager-58495d798b-lxjdg" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.616609 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2mvf2\" (UniqueName: \"kubernetes.io/projected/25190772-2e7e-4e99-9df6-727b970a7930-kube-api-access-2mvf2\") pod \"test-operator-controller-manager-7cd8bc9dbb-r274t\" (UID: \"25190772-2e7e-4e99-9df6-727b970a7930\") " pod="openstack-operators/test-operator-controller-manager-7cd8bc9dbb-r274t" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.616630 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wcssr\" (UniqueName: \"kubernetes.io/projected/e0c21887-edf1-4362-b212-456e024d2cd9-kube-api-access-wcssr\") pod \"swift-operator-controller-manager-85dd56d4cc-msh7h\" (UID: \"e0c21887-edf1-4362-b212-456e024d2cd9\") " pod="openstack-operators/swift-operator-controller-manager-85dd56d4cc-msh7h" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.623603 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-x6zvw"] Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.645852 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2mvf2\" (UniqueName: \"kubernetes.io/projected/25190772-2e7e-4e99-9df6-727b970a7930-kube-api-access-2mvf2\") pod \"test-operator-controller-manager-7cd8bc9dbb-r274t\" (UID: \"25190772-2e7e-4e99-9df6-727b970a7930\") " pod="openstack-operators/test-operator-controller-manager-7cd8bc9dbb-r274t" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.650170 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t2rcf\" (UniqueName: \"kubernetes.io/projected/794724a4-8705-4860-a126-6baefc733a24-kube-api-access-t2rcf\") pod \"watcher-operator-controller-manager-64cd966744-p4nrl\" (UID: \"794724a4-8705-4860-a126-6baefc733a24\") " pod="openstack-operators/watcher-operator-controller-manager-64cd966744-p4nrl" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.721444 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2pm7z\" (UniqueName: \"kubernetes.io/projected/37d25098-ad0c-459e-b6e7-6b11d269606b-kube-api-access-2pm7z\") pod \"rabbitmq-cluster-operator-manager-668c99d594-x6zvw\" (UID: \"37d25098-ad0c-459e-b6e7-6b11d269606b\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-x6zvw" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.723174 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/727fe8cb-51ad-433f-90e1-5998b948799a-webhook-certs\") pod \"openstack-operator-controller-manager-58495d798b-lxjdg\" (UID: \"727fe8cb-51ad-433f-90e1-5998b948799a\") " pod="openstack-operators/openstack-operator-controller-manager-58495d798b-lxjdg" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.723390 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/25709945-8415-492c-a829-fd79f3fbe521-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854sp4g6\" (UID: \"25709945-8415-492c-a829-fd79f3fbe521\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854sp4g6" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.723699 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wcssr\" (UniqueName: \"kubernetes.io/projected/e0c21887-edf1-4362-b212-456e024d2cd9-kube-api-access-wcssr\") pod \"swift-operator-controller-manager-85dd56d4cc-msh7h\" (UID: \"e0c21887-edf1-4362-b212-456e024d2cd9\") " pod="openstack-operators/swift-operator-controller-manager-85dd56d4cc-msh7h" Jan 21 15:40:03 crc kubenswrapper[5021]: E0121 15:40:03.723484 5021 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 21 15:40:03 crc kubenswrapper[5021]: E0121 15:40:03.723923 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/727fe8cb-51ad-433f-90e1-5998b948799a-webhook-certs podName:727fe8cb-51ad-433f-90e1-5998b948799a nodeName:}" failed. No retries permitted until 2026-01-21 15:40:04.223881905 +0000 UTC m=+945.758995794 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/727fe8cb-51ad-433f-90e1-5998b948799a-webhook-certs") pod "openstack-operator-controller-manager-58495d798b-lxjdg" (UID: "727fe8cb-51ad-433f-90e1-5998b948799a") : secret "webhook-server-cert" not found Jan 21 15:40:03 crc kubenswrapper[5021]: E0121 15:40:03.723579 5021 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 21 15:40:03 crc kubenswrapper[5021]: E0121 15:40:03.725065 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/25709945-8415-492c-a829-fd79f3fbe521-cert podName:25709945-8415-492c-a829-fd79f3fbe521 nodeName:}" failed. No retries permitted until 2026-01-21 15:40:04.725049837 +0000 UTC m=+946.260163726 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/25709945-8415-492c-a829-fd79f3fbe521-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b854sp4g6" (UID: "25709945-8415-492c-a829-fd79f3fbe521") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.725159 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/727fe8cb-51ad-433f-90e1-5998b948799a-metrics-certs\") pod \"openstack-operator-controller-manager-58495d798b-lxjdg\" (UID: \"727fe8cb-51ad-433f-90e1-5998b948799a\") " pod="openstack-operators/openstack-operator-controller-manager-58495d798b-lxjdg" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.725199 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2qlng\" (UniqueName: \"kubernetes.io/projected/727fe8cb-51ad-433f-90e1-5998b948799a-kube-api-access-2qlng\") pod \"openstack-operator-controller-manager-58495d798b-lxjdg\" (UID: \"727fe8cb-51ad-433f-90e1-5998b948799a\") " pod="openstack-operators/openstack-operator-controller-manager-58495d798b-lxjdg" Jan 21 15:40:03 crc kubenswrapper[5021]: E0121 15:40:03.725729 5021 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 21 15:40:03 crc kubenswrapper[5021]: E0121 15:40:03.725779 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/727fe8cb-51ad-433f-90e1-5998b948799a-metrics-certs podName:727fe8cb-51ad-433f-90e1-5998b948799a nodeName:}" failed. No retries permitted until 2026-01-21 15:40:04.225764127 +0000 UTC m=+945.760878016 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/727fe8cb-51ad-433f-90e1-5998b948799a-metrics-certs") pod "openstack-operator-controller-manager-58495d798b-lxjdg" (UID: "727fe8cb-51ad-433f-90e1-5998b948799a") : secret "metrics-server-cert" not found Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.749776 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wcssr\" (UniqueName: \"kubernetes.io/projected/e0c21887-edf1-4362-b212-456e024d2cd9-kube-api-access-wcssr\") pod \"swift-operator-controller-manager-85dd56d4cc-msh7h\" (UID: \"e0c21887-edf1-4362-b212-456e024d2cd9\") " pod="openstack-operators/swift-operator-controller-manager-85dd56d4cc-msh7h" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.760671 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2qlng\" (UniqueName: \"kubernetes.io/projected/727fe8cb-51ad-433f-90e1-5998b948799a-kube-api-access-2qlng\") pod \"openstack-operator-controller-manager-58495d798b-lxjdg\" (UID: \"727fe8cb-51ad-433f-90e1-5998b948799a\") " pod="openstack-operators/openstack-operator-controller-manager-58495d798b-lxjdg" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.827324 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2pm7z\" (UniqueName: \"kubernetes.io/projected/37d25098-ad0c-459e-b6e7-6b11d269606b-kube-api-access-2pm7z\") pod \"rabbitmq-cluster-operator-manager-668c99d594-x6zvw\" (UID: \"37d25098-ad0c-459e-b6e7-6b11d269606b\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-x6zvw" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.860475 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2pm7z\" (UniqueName: \"kubernetes.io/projected/37d25098-ad0c-459e-b6e7-6b11d269606b-kube-api-access-2pm7z\") pod \"rabbitmq-cluster-operator-manager-668c99d594-x6zvw\" (UID: \"37d25098-ad0c-459e-b6e7-6b11d269606b\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-x6zvw" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.879082 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-kmmh6" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.880966 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-96kl8" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.966375 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-7cd8bc9dbb-r274t" Jan 21 15:40:03 crc kubenswrapper[5021]: I0121 15:40:03.993282 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-64cd966744-p4nrl" Jan 21 15:40:04 crc kubenswrapper[5021]: I0121 15:40:04.005930 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-85dd56d4cc-msh7h" Jan 21 15:40:04 crc kubenswrapper[5021]: I0121 15:40:04.050027 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-x6zvw" Jan 21 15:40:04 crc kubenswrapper[5021]: I0121 15:40:04.238612 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/727fe8cb-51ad-433f-90e1-5998b948799a-metrics-certs\") pod \"openstack-operator-controller-manager-58495d798b-lxjdg\" (UID: \"727fe8cb-51ad-433f-90e1-5998b948799a\") " pod="openstack-operators/openstack-operator-controller-manager-58495d798b-lxjdg" Jan 21 15:40:04 crc kubenswrapper[5021]: I0121 15:40:04.238725 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/727fe8cb-51ad-433f-90e1-5998b948799a-webhook-certs\") pod \"openstack-operator-controller-manager-58495d798b-lxjdg\" (UID: \"727fe8cb-51ad-433f-90e1-5998b948799a\") " pod="openstack-operators/openstack-operator-controller-manager-58495d798b-lxjdg" Jan 21 15:40:04 crc kubenswrapper[5021]: E0121 15:40:04.238878 5021 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 21 15:40:04 crc kubenswrapper[5021]: E0121 15:40:04.238943 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/727fe8cb-51ad-433f-90e1-5998b948799a-webhook-certs podName:727fe8cb-51ad-433f-90e1-5998b948799a nodeName:}" failed. No retries permitted until 2026-01-21 15:40:05.238929043 +0000 UTC m=+946.774042932 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/727fe8cb-51ad-433f-90e1-5998b948799a-webhook-certs") pod "openstack-operator-controller-manager-58495d798b-lxjdg" (UID: "727fe8cb-51ad-433f-90e1-5998b948799a") : secret "webhook-server-cert" not found Jan 21 15:40:04 crc kubenswrapper[5021]: E0121 15:40:04.238992 5021 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 21 15:40:04 crc kubenswrapper[5021]: E0121 15:40:04.239016 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/727fe8cb-51ad-433f-90e1-5998b948799a-metrics-certs podName:727fe8cb-51ad-433f-90e1-5998b948799a nodeName:}" failed. No retries permitted until 2026-01-21 15:40:05.239009235 +0000 UTC m=+946.774123124 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/727fe8cb-51ad-433f-90e1-5998b948799a-metrics-certs") pod "openstack-operator-controller-manager-58495d798b-lxjdg" (UID: "727fe8cb-51ad-433f-90e1-5998b948799a") : secret "metrics-server-cert" not found Jan 21 15:40:04 crc kubenswrapper[5021]: I0121 15:40:04.340127 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/696e3c0f-78c0-4517-8def-49fbe8728f48-cert\") pod \"infra-operator-controller-manager-77c48c7859-njwdd\" (UID: \"696e3c0f-78c0-4517-8def-49fbe8728f48\") " pod="openstack-operators/infra-operator-controller-manager-77c48c7859-njwdd" Jan 21 15:40:04 crc kubenswrapper[5021]: E0121 15:40:04.340956 5021 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 21 15:40:04 crc kubenswrapper[5021]: E0121 15:40:04.341026 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/696e3c0f-78c0-4517-8def-49fbe8728f48-cert podName:696e3c0f-78c0-4517-8def-49fbe8728f48 nodeName:}" failed. No retries permitted until 2026-01-21 15:40:06.341001937 +0000 UTC m=+947.876115836 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/696e3c0f-78c0-4517-8def-49fbe8728f48-cert") pod "infra-operator-controller-manager-77c48c7859-njwdd" (UID: "696e3c0f-78c0-4517-8def-49fbe8728f48") : secret "infra-operator-webhook-server-cert" not found Jan 21 15:40:04 crc kubenswrapper[5021]: I0121 15:40:04.415786 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-9b68f5989-rkltx"] Jan 21 15:40:04 crc kubenswrapper[5021]: I0121 15:40:04.422379 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-9f958b845-tjspf"] Jan 21 15:40:04 crc kubenswrapper[5021]: I0121 15:40:04.430981 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7ddb5c749-x4l62"] Jan 21 15:40:04 crc kubenswrapper[5021]: I0121 15:40:04.451129 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-594c8c9d5d-zl9lj"] Jan 21 15:40:04 crc kubenswrapper[5021]: W0121 15:40:04.452504 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod724463d5_2779_4504_bbd1_4c12353a665c.slice/crio-2fa97399d9208ae2da6b64294240926068d1ced9ca95e20c413a50cbbb90d028 WatchSource:0}: Error finding container 2fa97399d9208ae2da6b64294240926068d1ced9ca95e20c413a50cbbb90d028: Status 404 returned error can't find the container with id 2fa97399d9208ae2da6b64294240926068d1ced9ca95e20c413a50cbbb90d028 Jan 21 15:40:04 crc kubenswrapper[5021]: W0121 15:40:04.457307 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6d2f5c7c_f0d7_405b_b9cf_427ea840f7c0.slice/crio-947b926c8977eee86279e6a4542bb0abb36e65c5a28574cc0e0ea01b8d5dc19f WatchSource:0}: Error finding container 947b926c8977eee86279e6a4542bb0abb36e65c5a28574cc0e0ea01b8d5dc19f: Status 404 returned error can't find the container with id 947b926c8977eee86279e6a4542bb0abb36e65c5a28574cc0e0ea01b8d5dc19f Jan 21 15:40:04 crc kubenswrapper[5021]: W0121 15:40:04.458949 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7645cfbe_28a4_4098_af64_3be341c2306f.slice/crio-48f27177317575daae34378bd8b349913df8bd6d164a82ee41b8c704f1d737a8 WatchSource:0}: Error finding container 48f27177317575daae34378bd8b349913df8bd6d164a82ee41b8c704f1d737a8: Status 404 returned error can't find the container with id 48f27177317575daae34378bd8b349913df8bd6d164a82ee41b8c704f1d737a8 Jan 21 15:40:04 crc kubenswrapper[5021]: I0121 15:40:04.603752 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-9b68f5989-rkltx" event={"ID":"33f53fd4-7cbc-4e1e-a72a-e48eee9ca274","Type":"ContainerStarted","Data":"0c49c0f477d9f83a482842b039cef566a7e79fef6dd3368e25fe59d11c618640"} Jan 21 15:40:04 crc kubenswrapper[5021]: I0121 15:40:04.605434 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-zl9lj" event={"ID":"7645cfbe-28a4-4098-af64-3be341c2306f","Type":"ContainerStarted","Data":"48f27177317575daae34378bd8b349913df8bd6d164a82ee41b8c704f1d737a8"} Jan 21 15:40:04 crc kubenswrapper[5021]: I0121 15:40:04.617320 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7ddb5c749-x4l62" event={"ID":"724463d5-2779-4504-bbd1-4c12353a665c","Type":"ContainerStarted","Data":"2fa97399d9208ae2da6b64294240926068d1ced9ca95e20c413a50cbbb90d028"} Jan 21 15:40:04 crc kubenswrapper[5021]: I0121 15:40:04.629332 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-9f958b845-tjspf" event={"ID":"6d2f5c7c-f0d7-405b-b9cf-427ea840f7c0","Type":"ContainerStarted","Data":"947b926c8977eee86279e6a4542bb0abb36e65c5a28574cc0e0ea01b8d5dc19f"} Jan 21 15:40:04 crc kubenswrapper[5021]: I0121 15:40:04.748218 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/25709945-8415-492c-a829-fd79f3fbe521-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854sp4g6\" (UID: \"25709945-8415-492c-a829-fd79f3fbe521\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854sp4g6" Jan 21 15:40:04 crc kubenswrapper[5021]: E0121 15:40:04.751116 5021 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 21 15:40:04 crc kubenswrapper[5021]: E0121 15:40:04.751179 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/25709945-8415-492c-a829-fd79f3fbe521-cert podName:25709945-8415-492c-a829-fd79f3fbe521 nodeName:}" failed. No retries permitted until 2026-01-21 15:40:06.751161604 +0000 UTC m=+948.286275493 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/25709945-8415-492c-a829-fd79f3fbe521-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b854sp4g6" (UID: "25709945-8415-492c-a829-fd79f3fbe521") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 21 15:40:04 crc kubenswrapper[5021]: I0121 15:40:04.834471 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-686df47fcb-tcnr8"] Jan 21 15:40:04 crc kubenswrapper[5021]: I0121 15:40:04.834559 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-767fdc4f47-wj9sz"] Jan 21 15:40:04 crc kubenswrapper[5021]: I0121 15:40:04.834591 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-c87fff755-tr25w"] Jan 21 15:40:04 crc kubenswrapper[5021]: I0121 15:40:04.840038 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-864f6b75bf-9bzn7"] Jan 21 15:40:04 crc kubenswrapper[5021]: I0121 15:40:04.870474 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-c6994669c-qswjh"] Jan 21 15:40:04 crc kubenswrapper[5021]: I0121 15:40:04.886436 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-78757b4889-2bt46"] Jan 21 15:40:04 crc kubenswrapper[5021]: I0121 15:40:04.900425 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-77d5c5b54f-s6lsx"] Jan 21 15:40:05 crc kubenswrapper[5021]: I0121 15:40:05.133284 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-96kl8"] Jan 21 15:40:05 crc kubenswrapper[5021]: I0121 15:40:05.142795 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-cb4666565-jk57l"] Jan 21 15:40:05 crc kubenswrapper[5021]: I0121 15:40:05.187034 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-7cd8bc9dbb-r274t"] Jan 21 15:40:05 crc kubenswrapper[5021]: I0121 15:40:05.200507 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-jgqdp"] Jan 21 15:40:05 crc kubenswrapper[5021]: E0121 15:40:05.211502 5021 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/octavia-operator@sha256:ab629ec4ce57b5cde9cd6d75069e68edca441b97b7b5a3f58804e2e61766b729,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-lfrhr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-7fc9b76cf6-jgqdp_openstack-operators(48921cb7-8983-4b8e-87cd-3316190ede3e): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 21 15:40:05 crc kubenswrapper[5021]: E0121 15:40:05.213102 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-jgqdp" podUID="48921cb7-8983-4b8e-87cd-3316190ede3e" Jan 21 15:40:05 crc kubenswrapper[5021]: I0121 15:40:05.217276 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-55db956ddc-kmmh6"] Jan 21 15:40:05 crc kubenswrapper[5021]: W0121 15:40:05.223158 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod25190772_2e7e_4e99_9df6_727b970a7930.slice/crio-a064f68e422918824940e214b1cf3558783fc9c7318e9a04a4a59783ee50a9f4 WatchSource:0}: Error finding container a064f68e422918824940e214b1cf3558783fc9c7318e9a04a4a59783ee50a9f4: Status 404 returned error can't find the container with id a064f68e422918824940e214b1cf3558783fc9c7318e9a04a4a59783ee50a9f4 Jan 21 15:40:05 crc kubenswrapper[5021]: I0121 15:40:05.223598 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-x6zvw"] Jan 21 15:40:05 crc kubenswrapper[5021]: E0121 15:40:05.231511 5021 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:244a4906353b84899db16a89e1ebb64491c9f85e69327cb2a72b6da0142a6e5e,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-2mvf2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-7cd8bc9dbb-r274t_openstack-operators(25190772-2e7e-4e99-9df6-727b970a7930): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 21 15:40:05 crc kubenswrapper[5021]: I0121 15:40:05.232525 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-65849867d6-xm9sm"] Jan 21 15:40:05 crc kubenswrapper[5021]: E0121 15:40:05.232677 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/test-operator-controller-manager-7cd8bc9dbb-r274t" podUID="25190772-2e7e-4e99-9df6-727b970a7930" Jan 21 15:40:05 crc kubenswrapper[5021]: W0121 15:40:05.232853 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod794724a4_8705_4860_a126_6baefc733a24.slice/crio-53b60b62495764f5dd00a40735cfe5fd9daa104f4f2f347d672c41313fd32302 WatchSource:0}: Error finding container 53b60b62495764f5dd00a40735cfe5fd9daa104f4f2f347d672c41313fd32302: Status 404 returned error can't find the container with id 53b60b62495764f5dd00a40735cfe5fd9daa104f4f2f347d672c41313fd32302 Jan 21 15:40:05 crc kubenswrapper[5021]: W0121 15:40:05.250724 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37d25098_ad0c_459e_b6e7_6b11d269606b.slice/crio-57e869ecec81a70fe1184915b00c445a28157ba820a4ba5609fa0b866e7bc5ed WatchSource:0}: Error finding container 57e869ecec81a70fe1184915b00c445a28157ba820a4ba5609fa0b866e7bc5ed: Status 404 returned error can't find the container with id 57e869ecec81a70fe1184915b00c445a28157ba820a4ba5609fa0b866e7bc5ed Jan 21 15:40:05 crc kubenswrapper[5021]: E0121 15:40:05.252680 5021 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:6defa56fc6a5bfbd5b27d28ff7b1c7bc89b24b2ef956e2a6d97b2726f668a231,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-tvpms,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-65849867d6-xm9sm_openstack-operators(569260c0-7874-41fa-9114-66643a79cdfe): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 21 15:40:05 crc kubenswrapper[5021]: E0121 15:40:05.252933 5021 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ovn-operator@sha256:8b3bfb9e86618b7ac69443939b0968fae28a22cd62ea1e429b599ff9f8a5f8cf,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-2glvr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-55db956ddc-kmmh6_openstack-operators(0ca88d8a-abb8-498b-9588-376e3cc3a49e): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 21 15:40:05 crc kubenswrapper[5021]: E0121 15:40:05.253058 5021 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:9404536bf7cb7c3818e1a0f92b53e4d7c02fe7942324f32894106f02f8fc7e92,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-wcssr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-85dd56d4cc-msh7h_openstack-operators(e0c21887-edf1-4362-b212-456e024d2cd9): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 21 15:40:05 crc kubenswrapper[5021]: E0121 15:40:05.254021 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/nova-operator-controller-manager-65849867d6-xm9sm" podUID="569260c0-7874-41fa-9114-66643a79cdfe" Jan 21 15:40:05 crc kubenswrapper[5021]: E0121 15:40:05.254096 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-kmmh6" podUID="0ca88d8a-abb8-498b-9588-376e3cc3a49e" Jan 21 15:40:05 crc kubenswrapper[5021]: E0121 15:40:05.254158 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/swift-operator-controller-manager-85dd56d4cc-msh7h" podUID="e0c21887-edf1-4362-b212-456e024d2cd9" Jan 21 15:40:05 crc kubenswrapper[5021]: E0121 15:40:05.256777 5021 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-2pm7z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-x6zvw_openstack-operators(37d25098-ad0c-459e-b6e7-6b11d269606b): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 21 15:40:05 crc kubenswrapper[5021]: I0121 15:40:05.257834 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-64cd966744-p4nrl"] Jan 21 15:40:05 crc kubenswrapper[5021]: E0121 15:40:05.257886 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-x6zvw" podUID="37d25098-ad0c-459e-b6e7-6b11d269606b" Jan 21 15:40:05 crc kubenswrapper[5021]: I0121 15:40:05.272196 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-85dd56d4cc-msh7h"] Jan 21 15:40:05 crc kubenswrapper[5021]: I0121 15:40:05.275866 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/727fe8cb-51ad-433f-90e1-5998b948799a-webhook-certs\") pod \"openstack-operator-controller-manager-58495d798b-lxjdg\" (UID: \"727fe8cb-51ad-433f-90e1-5998b948799a\") " pod="openstack-operators/openstack-operator-controller-manager-58495d798b-lxjdg" Jan 21 15:40:05 crc kubenswrapper[5021]: I0121 15:40:05.277368 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/727fe8cb-51ad-433f-90e1-5998b948799a-metrics-certs\") pod \"openstack-operator-controller-manager-58495d798b-lxjdg\" (UID: \"727fe8cb-51ad-433f-90e1-5998b948799a\") " pod="openstack-operators/openstack-operator-controller-manager-58495d798b-lxjdg" Jan 21 15:40:05 crc kubenswrapper[5021]: E0121 15:40:05.277512 5021 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 21 15:40:05 crc kubenswrapper[5021]: E0121 15:40:05.277611 5021 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 21 15:40:05 crc kubenswrapper[5021]: E0121 15:40:05.277688 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/727fe8cb-51ad-433f-90e1-5998b948799a-webhook-certs podName:727fe8cb-51ad-433f-90e1-5998b948799a nodeName:}" failed. No retries permitted until 2026-01-21 15:40:07.277669025 +0000 UTC m=+948.812782914 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/727fe8cb-51ad-433f-90e1-5998b948799a-webhook-certs") pod "openstack-operator-controller-manager-58495d798b-lxjdg" (UID: "727fe8cb-51ad-433f-90e1-5998b948799a") : secret "webhook-server-cert" not found Jan 21 15:40:05 crc kubenswrapper[5021]: E0121 15:40:05.277770 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/727fe8cb-51ad-433f-90e1-5998b948799a-metrics-certs podName:727fe8cb-51ad-433f-90e1-5998b948799a nodeName:}" failed. No retries permitted until 2026-01-21 15:40:07.277747617 +0000 UTC m=+948.812861506 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/727fe8cb-51ad-433f-90e1-5998b948799a-metrics-certs") pod "openstack-operator-controller-manager-58495d798b-lxjdg" (UID: "727fe8cb-51ad-433f-90e1-5998b948799a") : secret "metrics-server-cert" not found Jan 21 15:40:05 crc kubenswrapper[5021]: I0121 15:40:05.644760 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-65849867d6-xm9sm" event={"ID":"569260c0-7874-41fa-9114-66643a79cdfe","Type":"ContainerStarted","Data":"5109b37b32c277118abf8153b51fa773b7bce5258e805dee514c46622bde1775"} Jan 21 15:40:05 crc kubenswrapper[5021]: E0121 15:40:05.654408 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:6defa56fc6a5bfbd5b27d28ff7b1c7bc89b24b2ef956e2a6d97b2726f668a231\\\"\"" pod="openstack-operators/nova-operator-controller-manager-65849867d6-xm9sm" podUID="569260c0-7874-41fa-9114-66643a79cdfe" Jan 21 15:40:05 crc kubenswrapper[5021]: I0121 15:40:05.659392 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-jgqdp" event={"ID":"48921cb7-8983-4b8e-87cd-3316190ede3e","Type":"ContainerStarted","Data":"bccb8960fd508bd51c22db9f4e8734f9f384b3e9519a5a62926cbbf653e5e907"} Jan 21 15:40:05 crc kubenswrapper[5021]: E0121 15:40:05.661099 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:ab629ec4ce57b5cde9cd6d75069e68edca441b97b7b5a3f58804e2e61766b729\\\"\"" pod="openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-jgqdp" podUID="48921cb7-8983-4b8e-87cd-3316190ede3e" Jan 21 15:40:05 crc kubenswrapper[5021]: I0121 15:40:05.663891 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-x6zvw" event={"ID":"37d25098-ad0c-459e-b6e7-6b11d269606b","Type":"ContainerStarted","Data":"57e869ecec81a70fe1184915b00c445a28157ba820a4ba5609fa0b866e7bc5ed"} Jan 21 15:40:05 crc kubenswrapper[5021]: E0121 15:40:05.665614 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-x6zvw" podUID="37d25098-ad0c-459e-b6e7-6b11d269606b" Jan 21 15:40:05 crc kubenswrapper[5021]: I0121 15:40:05.668112 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-kmmh6" event={"ID":"0ca88d8a-abb8-498b-9588-376e3cc3a49e","Type":"ContainerStarted","Data":"81402a55f447260b7aba79918d2aee07217cc5c9dcc644557192ac4e10ac9e4c"} Jan 21 15:40:05 crc kubenswrapper[5021]: E0121 15:40:05.669849 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:8b3bfb9e86618b7ac69443939b0968fae28a22cd62ea1e429b599ff9f8a5f8cf\\\"\"" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-kmmh6" podUID="0ca88d8a-abb8-498b-9588-376e3cc3a49e" Jan 21 15:40:05 crc kubenswrapper[5021]: I0121 15:40:05.670184 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-96kl8" event={"ID":"46f232dd-a469-4c74-b456-ba1b8f80b32a","Type":"ContainerStarted","Data":"5c7e27b43d9115952818a21f7c8f79024de84131b58abbb146f66d380e1e943b"} Jan 21 15:40:05 crc kubenswrapper[5021]: I0121 15:40:05.685100 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-tr25w" event={"ID":"4180df4a-4632-4c29-b5cf-a597b93d4541","Type":"ContainerStarted","Data":"f5e371adf41d101d48ed0deb3dc737c219d6ce1d81445ffa6bccb0e605c4af0c"} Jan 21 15:40:05 crc kubenswrapper[5021]: I0121 15:40:05.687014 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-686df47fcb-tcnr8" event={"ID":"00e6bc09-d424-4ed1-b62a-b6fadc7416ec","Type":"ContainerStarted","Data":"c21085a24423d20828824632e1d252010532c34d62ec3ca06175dd2c421ae568"} Jan 21 15:40:05 crc kubenswrapper[5021]: I0121 15:40:05.689442 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-7cd8bc9dbb-r274t" event={"ID":"25190772-2e7e-4e99-9df6-727b970a7930","Type":"ContainerStarted","Data":"a064f68e422918824940e214b1cf3558783fc9c7318e9a04a4a59783ee50a9f4"} Jan 21 15:40:05 crc kubenswrapper[5021]: E0121 15:40:05.698113 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:244a4906353b84899db16a89e1ebb64491c9f85e69327cb2a72b6da0142a6e5e\\\"\"" pod="openstack-operators/test-operator-controller-manager-7cd8bc9dbb-r274t" podUID="25190772-2e7e-4e99-9df6-727b970a7930" Jan 21 15:40:05 crc kubenswrapper[5021]: I0121 15:40:05.699720 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-864f6b75bf-9bzn7" event={"ID":"f9c1fef7-2823-4ebc-866a-adea991f6b5c","Type":"ContainerStarted","Data":"fb0b7c55ff3cc5c54491743f824dac2671d2acdbb9eaaabeedb7d575f37d6a08"} Jan 21 15:40:05 crc kubenswrapper[5021]: I0121 15:40:05.719697 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-85dd56d4cc-msh7h" event={"ID":"e0c21887-edf1-4362-b212-456e024d2cd9","Type":"ContainerStarted","Data":"f8bed840b60eba493e982bcefbf8221274e086b1ee20b743ea74bd7c6524ce8f"} Jan 21 15:40:05 crc kubenswrapper[5021]: E0121 15:40:05.721377 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:9404536bf7cb7c3818e1a0f92b53e4d7c02fe7942324f32894106f02f8fc7e92\\\"\"" pod="openstack-operators/swift-operator-controller-manager-85dd56d4cc-msh7h" podUID="e0c21887-edf1-4362-b212-456e024d2cd9" Jan 21 15:40:05 crc kubenswrapper[5021]: I0121 15:40:05.770178 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-cb4666565-jk57l" event={"ID":"dd66a6c4-dee4-4079-ac1c-d838cc27f752","Type":"ContainerStarted","Data":"8f89d578e2fc38dcc539d2c5f9d6fa3193414e9749eeb437e17761161e78a7b5"} Jan 21 15:40:05 crc kubenswrapper[5021]: I0121 15:40:05.827237 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-c6994669c-qswjh" event={"ID":"8e5df137-5a39-434e-9ed8-cd984d3cfecb","Type":"ContainerStarted","Data":"7ae921fc2f875f75c736ba637847c0bf81bc543cff2251505bd43830e64a8936"} Jan 21 15:40:05 crc kubenswrapper[5021]: I0121 15:40:05.879352 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-s6lsx" event={"ID":"fcd8d00d-0a93-400d-8c23-eb51dbf56a35","Type":"ContainerStarted","Data":"dc934cd6e5e5f32c7c89faab57eab8a7fe84816a580530f37c0c0700a6457c29"} Jan 21 15:40:05 crc kubenswrapper[5021]: I0121 15:40:05.893283 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-64cd966744-p4nrl" event={"ID":"794724a4-8705-4860-a126-6baefc733a24","Type":"ContainerStarted","Data":"53b60b62495764f5dd00a40735cfe5fd9daa104f4f2f347d672c41313fd32302"} Jan 21 15:40:05 crc kubenswrapper[5021]: I0121 15:40:05.908873 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-767fdc4f47-wj9sz" event={"ID":"c20dfe07-4e4b-44e0-a260-ff4958985c0c","Type":"ContainerStarted","Data":"926ebb53efcf3145962d6a86bc68797ff9a55d1c2d3928c407b536a0a2bb70c3"} Jan 21 15:40:05 crc kubenswrapper[5021]: I0121 15:40:05.931648 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-78757b4889-2bt46" event={"ID":"9fdec40b-ea8d-4d5e-82ac-27b0a76f450b","Type":"ContainerStarted","Data":"de020082a4b0498876fa32dac656dcae6155557106b40662b176254980255aef"} Jan 21 15:40:06 crc kubenswrapper[5021]: I0121 15:40:06.399141 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/696e3c0f-78c0-4517-8def-49fbe8728f48-cert\") pod \"infra-operator-controller-manager-77c48c7859-njwdd\" (UID: \"696e3c0f-78c0-4517-8def-49fbe8728f48\") " pod="openstack-operators/infra-operator-controller-manager-77c48c7859-njwdd" Jan 21 15:40:06 crc kubenswrapper[5021]: E0121 15:40:06.399421 5021 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 21 15:40:06 crc kubenswrapper[5021]: E0121 15:40:06.399526 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/696e3c0f-78c0-4517-8def-49fbe8728f48-cert podName:696e3c0f-78c0-4517-8def-49fbe8728f48 nodeName:}" failed. No retries permitted until 2026-01-21 15:40:10.399504101 +0000 UTC m=+951.934617990 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/696e3c0f-78c0-4517-8def-49fbe8728f48-cert") pod "infra-operator-controller-manager-77c48c7859-njwdd" (UID: "696e3c0f-78c0-4517-8def-49fbe8728f48") : secret "infra-operator-webhook-server-cert" not found Jan 21 15:40:06 crc kubenswrapper[5021]: I0121 15:40:06.806957 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/25709945-8415-492c-a829-fd79f3fbe521-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854sp4g6\" (UID: \"25709945-8415-492c-a829-fd79f3fbe521\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854sp4g6" Jan 21 15:40:06 crc kubenswrapper[5021]: E0121 15:40:06.807146 5021 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 21 15:40:06 crc kubenswrapper[5021]: E0121 15:40:06.807225 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/25709945-8415-492c-a829-fd79f3fbe521-cert podName:25709945-8415-492c-a829-fd79f3fbe521 nodeName:}" failed. No retries permitted until 2026-01-21 15:40:10.807206611 +0000 UTC m=+952.342320500 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/25709945-8415-492c-a829-fd79f3fbe521-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b854sp4g6" (UID: "25709945-8415-492c-a829-fd79f3fbe521") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 21 15:40:07 crc kubenswrapper[5021]: E0121 15:40:06.998589 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:8b3bfb9e86618b7ac69443939b0968fae28a22cd62ea1e429b599ff9f8a5f8cf\\\"\"" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-kmmh6" podUID="0ca88d8a-abb8-498b-9588-376e3cc3a49e" Jan 21 15:40:07 crc kubenswrapper[5021]: E0121 15:40:06.999007 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:ab629ec4ce57b5cde9cd6d75069e68edca441b97b7b5a3f58804e2e61766b729\\\"\"" pod="openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-jgqdp" podUID="48921cb7-8983-4b8e-87cd-3316190ede3e" Jan 21 15:40:07 crc kubenswrapper[5021]: E0121 15:40:07.004744 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:6defa56fc6a5bfbd5b27d28ff7b1c7bc89b24b2ef956e2a6d97b2726f668a231\\\"\"" pod="openstack-operators/nova-operator-controller-manager-65849867d6-xm9sm" podUID="569260c0-7874-41fa-9114-66643a79cdfe" Jan 21 15:40:07 crc kubenswrapper[5021]: E0121 15:40:07.004849 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:9404536bf7cb7c3818e1a0f92b53e4d7c02fe7942324f32894106f02f8fc7e92\\\"\"" pod="openstack-operators/swift-operator-controller-manager-85dd56d4cc-msh7h" podUID="e0c21887-edf1-4362-b212-456e024d2cd9" Jan 21 15:40:07 crc kubenswrapper[5021]: E0121 15:40:07.004900 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:244a4906353b84899db16a89e1ebb64491c9f85e69327cb2a72b6da0142a6e5e\\\"\"" pod="openstack-operators/test-operator-controller-manager-7cd8bc9dbb-r274t" podUID="25190772-2e7e-4e99-9df6-727b970a7930" Jan 21 15:40:07 crc kubenswrapper[5021]: E0121 15:40:07.004966 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-x6zvw" podUID="37d25098-ad0c-459e-b6e7-6b11d269606b" Jan 21 15:40:07 crc kubenswrapper[5021]: I0121 15:40:07.316435 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/727fe8cb-51ad-433f-90e1-5998b948799a-metrics-certs\") pod \"openstack-operator-controller-manager-58495d798b-lxjdg\" (UID: \"727fe8cb-51ad-433f-90e1-5998b948799a\") " pod="openstack-operators/openstack-operator-controller-manager-58495d798b-lxjdg" Jan 21 15:40:07 crc kubenswrapper[5021]: I0121 15:40:07.316549 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/727fe8cb-51ad-433f-90e1-5998b948799a-webhook-certs\") pod \"openstack-operator-controller-manager-58495d798b-lxjdg\" (UID: \"727fe8cb-51ad-433f-90e1-5998b948799a\") " pod="openstack-operators/openstack-operator-controller-manager-58495d798b-lxjdg" Jan 21 15:40:07 crc kubenswrapper[5021]: E0121 15:40:07.316559 5021 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 21 15:40:07 crc kubenswrapper[5021]: E0121 15:40:07.316646 5021 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 21 15:40:07 crc kubenswrapper[5021]: E0121 15:40:07.317955 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/727fe8cb-51ad-433f-90e1-5998b948799a-metrics-certs podName:727fe8cb-51ad-433f-90e1-5998b948799a nodeName:}" failed. No retries permitted until 2026-01-21 15:40:11.31792952 +0000 UTC m=+952.853043419 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/727fe8cb-51ad-433f-90e1-5998b948799a-metrics-certs") pod "openstack-operator-controller-manager-58495d798b-lxjdg" (UID: "727fe8cb-51ad-433f-90e1-5998b948799a") : secret "metrics-server-cert" not found Jan 21 15:40:07 crc kubenswrapper[5021]: E0121 15:40:07.317984 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/727fe8cb-51ad-433f-90e1-5998b948799a-webhook-certs podName:727fe8cb-51ad-433f-90e1-5998b948799a nodeName:}" failed. No retries permitted until 2026-01-21 15:40:11.317975951 +0000 UTC m=+952.853089840 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/727fe8cb-51ad-433f-90e1-5998b948799a-webhook-certs") pod "openstack-operator-controller-manager-58495d798b-lxjdg" (UID: "727fe8cb-51ad-433f-90e1-5998b948799a") : secret "webhook-server-cert" not found Jan 21 15:40:09 crc kubenswrapper[5021]: I0121 15:40:09.855323 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-9tpnf"] Jan 21 15:40:09 crc kubenswrapper[5021]: I0121 15:40:09.857225 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9tpnf" Jan 21 15:40:09 crc kubenswrapper[5021]: I0121 15:40:09.871327 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-9tpnf"] Jan 21 15:40:09 crc kubenswrapper[5021]: I0121 15:40:09.986648 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7e230411-33c2-44ef-b28f-8833c893c0eb-utilities\") pod \"community-operators-9tpnf\" (UID: \"7e230411-33c2-44ef-b28f-8833c893c0eb\") " pod="openshift-marketplace/community-operators-9tpnf" Jan 21 15:40:09 crc kubenswrapper[5021]: I0121 15:40:09.987292 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hczxp\" (UniqueName: \"kubernetes.io/projected/7e230411-33c2-44ef-b28f-8833c893c0eb-kube-api-access-hczxp\") pod \"community-operators-9tpnf\" (UID: \"7e230411-33c2-44ef-b28f-8833c893c0eb\") " pod="openshift-marketplace/community-operators-9tpnf" Jan 21 15:40:09 crc kubenswrapper[5021]: I0121 15:40:09.987804 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7e230411-33c2-44ef-b28f-8833c893c0eb-catalog-content\") pod \"community-operators-9tpnf\" (UID: \"7e230411-33c2-44ef-b28f-8833c893c0eb\") " pod="openshift-marketplace/community-operators-9tpnf" Jan 21 15:40:10 crc kubenswrapper[5021]: I0121 15:40:10.089606 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7e230411-33c2-44ef-b28f-8833c893c0eb-catalog-content\") pod \"community-operators-9tpnf\" (UID: \"7e230411-33c2-44ef-b28f-8833c893c0eb\") " pod="openshift-marketplace/community-operators-9tpnf" Jan 21 15:40:10 crc kubenswrapper[5021]: I0121 15:40:10.089688 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7e230411-33c2-44ef-b28f-8833c893c0eb-utilities\") pod \"community-operators-9tpnf\" (UID: \"7e230411-33c2-44ef-b28f-8833c893c0eb\") " pod="openshift-marketplace/community-operators-9tpnf" Jan 21 15:40:10 crc kubenswrapper[5021]: I0121 15:40:10.089724 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hczxp\" (UniqueName: \"kubernetes.io/projected/7e230411-33c2-44ef-b28f-8833c893c0eb-kube-api-access-hczxp\") pod \"community-operators-9tpnf\" (UID: \"7e230411-33c2-44ef-b28f-8833c893c0eb\") " pod="openshift-marketplace/community-operators-9tpnf" Jan 21 15:40:10 crc kubenswrapper[5021]: I0121 15:40:10.090488 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7e230411-33c2-44ef-b28f-8833c893c0eb-catalog-content\") pod \"community-operators-9tpnf\" (UID: \"7e230411-33c2-44ef-b28f-8833c893c0eb\") " pod="openshift-marketplace/community-operators-9tpnf" Jan 21 15:40:10 crc kubenswrapper[5021]: I0121 15:40:10.090518 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7e230411-33c2-44ef-b28f-8833c893c0eb-utilities\") pod \"community-operators-9tpnf\" (UID: \"7e230411-33c2-44ef-b28f-8833c893c0eb\") " pod="openshift-marketplace/community-operators-9tpnf" Jan 21 15:40:10 crc kubenswrapper[5021]: I0121 15:40:10.124343 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hczxp\" (UniqueName: \"kubernetes.io/projected/7e230411-33c2-44ef-b28f-8833c893c0eb-kube-api-access-hczxp\") pod \"community-operators-9tpnf\" (UID: \"7e230411-33c2-44ef-b28f-8833c893c0eb\") " pod="openshift-marketplace/community-operators-9tpnf" Jan 21 15:40:10 crc kubenswrapper[5021]: I0121 15:40:10.185391 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9tpnf" Jan 21 15:40:10 crc kubenswrapper[5021]: I0121 15:40:10.496330 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/696e3c0f-78c0-4517-8def-49fbe8728f48-cert\") pod \"infra-operator-controller-manager-77c48c7859-njwdd\" (UID: \"696e3c0f-78c0-4517-8def-49fbe8728f48\") " pod="openstack-operators/infra-operator-controller-manager-77c48c7859-njwdd" Jan 21 15:40:10 crc kubenswrapper[5021]: E0121 15:40:10.496547 5021 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 21 15:40:10 crc kubenswrapper[5021]: E0121 15:40:10.496646 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/696e3c0f-78c0-4517-8def-49fbe8728f48-cert podName:696e3c0f-78c0-4517-8def-49fbe8728f48 nodeName:}" failed. No retries permitted until 2026-01-21 15:40:18.496625866 +0000 UTC m=+960.031739755 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/696e3c0f-78c0-4517-8def-49fbe8728f48-cert") pod "infra-operator-controller-manager-77c48c7859-njwdd" (UID: "696e3c0f-78c0-4517-8def-49fbe8728f48") : secret "infra-operator-webhook-server-cert" not found Jan 21 15:40:10 crc kubenswrapper[5021]: I0121 15:40:10.907240 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/25709945-8415-492c-a829-fd79f3fbe521-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854sp4g6\" (UID: \"25709945-8415-492c-a829-fd79f3fbe521\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854sp4g6" Jan 21 15:40:10 crc kubenswrapper[5021]: E0121 15:40:10.907483 5021 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 21 15:40:10 crc kubenswrapper[5021]: E0121 15:40:10.907562 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/25709945-8415-492c-a829-fd79f3fbe521-cert podName:25709945-8415-492c-a829-fd79f3fbe521 nodeName:}" failed. No retries permitted until 2026-01-21 15:40:18.907537734 +0000 UTC m=+960.442651623 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/25709945-8415-492c-a829-fd79f3fbe521-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b854sp4g6" (UID: "25709945-8415-492c-a829-fd79f3fbe521") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 21 15:40:11 crc kubenswrapper[5021]: I0121 15:40:11.415589 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/727fe8cb-51ad-433f-90e1-5998b948799a-webhook-certs\") pod \"openstack-operator-controller-manager-58495d798b-lxjdg\" (UID: \"727fe8cb-51ad-433f-90e1-5998b948799a\") " pod="openstack-operators/openstack-operator-controller-manager-58495d798b-lxjdg" Jan 21 15:40:11 crc kubenswrapper[5021]: I0121 15:40:11.415713 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/727fe8cb-51ad-433f-90e1-5998b948799a-metrics-certs\") pod \"openstack-operator-controller-manager-58495d798b-lxjdg\" (UID: \"727fe8cb-51ad-433f-90e1-5998b948799a\") " pod="openstack-operators/openstack-operator-controller-manager-58495d798b-lxjdg" Jan 21 15:40:11 crc kubenswrapper[5021]: E0121 15:40:11.415852 5021 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 21 15:40:11 crc kubenswrapper[5021]: E0121 15:40:11.416048 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/727fe8cb-51ad-433f-90e1-5998b948799a-webhook-certs podName:727fe8cb-51ad-433f-90e1-5998b948799a nodeName:}" failed. No retries permitted until 2026-01-21 15:40:19.416020722 +0000 UTC m=+960.951134611 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/727fe8cb-51ad-433f-90e1-5998b948799a-webhook-certs") pod "openstack-operator-controller-manager-58495d798b-lxjdg" (UID: "727fe8cb-51ad-433f-90e1-5998b948799a") : secret "webhook-server-cert" not found Jan 21 15:40:11 crc kubenswrapper[5021]: E0121 15:40:11.415884 5021 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 21 15:40:11 crc kubenswrapper[5021]: E0121 15:40:11.416119 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/727fe8cb-51ad-433f-90e1-5998b948799a-metrics-certs podName:727fe8cb-51ad-433f-90e1-5998b948799a nodeName:}" failed. No retries permitted until 2026-01-21 15:40:19.416100824 +0000 UTC m=+960.951214713 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/727fe8cb-51ad-433f-90e1-5998b948799a-metrics-certs") pod "openstack-operator-controller-manager-58495d798b-lxjdg" (UID: "727fe8cb-51ad-433f-90e1-5998b948799a") : secret "metrics-server-cert" not found Jan 21 15:40:12 crc kubenswrapper[5021]: I0121 15:40:12.357179 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 15:40:12 crc kubenswrapper[5021]: I0121 15:40:12.357525 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 15:40:18 crc kubenswrapper[5021]: I0121 15:40:18.532054 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/696e3c0f-78c0-4517-8def-49fbe8728f48-cert\") pod \"infra-operator-controller-manager-77c48c7859-njwdd\" (UID: \"696e3c0f-78c0-4517-8def-49fbe8728f48\") " pod="openstack-operators/infra-operator-controller-manager-77c48c7859-njwdd" Jan 21 15:40:18 crc kubenswrapper[5021]: E0121 15:40:18.532302 5021 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 21 15:40:18 crc kubenswrapper[5021]: E0121 15:40:18.532775 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/696e3c0f-78c0-4517-8def-49fbe8728f48-cert podName:696e3c0f-78c0-4517-8def-49fbe8728f48 nodeName:}" failed. No retries permitted until 2026-01-21 15:40:34.532724567 +0000 UTC m=+976.067838456 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/696e3c0f-78c0-4517-8def-49fbe8728f48-cert") pod "infra-operator-controller-manager-77c48c7859-njwdd" (UID: "696e3c0f-78c0-4517-8def-49fbe8728f48") : secret "infra-operator-webhook-server-cert" not found Jan 21 15:40:18 crc kubenswrapper[5021]: E0121 15:40:18.711921 5021 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/glance-operator@sha256:d69a68cdac59165797daf1064f3a3b4b14b546bf1c7254070a7ed1238998c028" Jan 21 15:40:18 crc kubenswrapper[5021]: E0121 15:40:18.712274 5021 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/glance-operator@sha256:d69a68cdac59165797daf1064f3a3b4b14b546bf1c7254070a7ed1238998c028,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-n4x75,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-operator-controller-manager-c6994669c-qswjh_openstack-operators(8e5df137-5a39-434e-9ed8-cd984d3cfecb): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 21 15:40:18 crc kubenswrapper[5021]: E0121 15:40:18.713445 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/glance-operator-controller-manager-c6994669c-qswjh" podUID="8e5df137-5a39-434e-9ed8-cd984d3cfecb" Jan 21 15:40:18 crc kubenswrapper[5021]: I0121 15:40:18.940562 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/25709945-8415-492c-a829-fd79f3fbe521-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854sp4g6\" (UID: \"25709945-8415-492c-a829-fd79f3fbe521\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854sp4g6" Jan 21 15:40:18 crc kubenswrapper[5021]: I0121 15:40:18.962397 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/25709945-8415-492c-a829-fd79f3fbe521-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854sp4g6\" (UID: \"25709945-8415-492c-a829-fd79f3fbe521\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854sp4g6" Jan 21 15:40:19 crc kubenswrapper[5021]: E0121 15:40:19.132575 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/glance-operator@sha256:d69a68cdac59165797daf1064f3a3b4b14b546bf1c7254070a7ed1238998c028\\\"\"" pod="openstack-operators/glance-operator-controller-manager-c6994669c-qswjh" podUID="8e5df137-5a39-434e-9ed8-cd984d3cfecb" Jan 21 15:40:19 crc kubenswrapper[5021]: I0121 15:40:19.143062 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-2n6cl" Jan 21 15:40:19 crc kubenswrapper[5021]: I0121 15:40:19.151595 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854sp4g6" Jan 21 15:40:19 crc kubenswrapper[5021]: E0121 15:40:19.317291 5021 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/neutron-operator@sha256:0f440bf7dc937ce0135bdd328716686fd2f1320f453a9ac4e11e96383148ad6c" Jan 21 15:40:19 crc kubenswrapper[5021]: E0121 15:40:19.317492 5021 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/neutron-operator@sha256:0f440bf7dc937ce0135bdd328716686fd2f1320f453a9ac4e11e96383148ad6c,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-hk7fz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod neutron-operator-controller-manager-cb4666565-jk57l_openstack-operators(dd66a6c4-dee4-4079-ac1c-d838cc27f752): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 21 15:40:19 crc kubenswrapper[5021]: E0121 15:40:19.318713 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/neutron-operator-controller-manager-cb4666565-jk57l" podUID="dd66a6c4-dee4-4079-ac1c-d838cc27f752" Jan 21 15:40:19 crc kubenswrapper[5021]: I0121 15:40:19.447704 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/727fe8cb-51ad-433f-90e1-5998b948799a-webhook-certs\") pod \"openstack-operator-controller-manager-58495d798b-lxjdg\" (UID: \"727fe8cb-51ad-433f-90e1-5998b948799a\") " pod="openstack-operators/openstack-operator-controller-manager-58495d798b-lxjdg" Jan 21 15:40:19 crc kubenswrapper[5021]: I0121 15:40:19.447819 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/727fe8cb-51ad-433f-90e1-5998b948799a-metrics-certs\") pod \"openstack-operator-controller-manager-58495d798b-lxjdg\" (UID: \"727fe8cb-51ad-433f-90e1-5998b948799a\") " pod="openstack-operators/openstack-operator-controller-manager-58495d798b-lxjdg" Jan 21 15:40:19 crc kubenswrapper[5021]: I0121 15:40:19.453488 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/727fe8cb-51ad-433f-90e1-5998b948799a-metrics-certs\") pod \"openstack-operator-controller-manager-58495d798b-lxjdg\" (UID: \"727fe8cb-51ad-433f-90e1-5998b948799a\") " pod="openstack-operators/openstack-operator-controller-manager-58495d798b-lxjdg" Jan 21 15:40:19 crc kubenswrapper[5021]: I0121 15:40:19.466044 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/727fe8cb-51ad-433f-90e1-5998b948799a-webhook-certs\") pod \"openstack-operator-controller-manager-58495d798b-lxjdg\" (UID: \"727fe8cb-51ad-433f-90e1-5998b948799a\") " pod="openstack-operators/openstack-operator-controller-manager-58495d798b-lxjdg" Jan 21 15:40:19 crc kubenswrapper[5021]: I0121 15:40:19.633791 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-ntddp" Jan 21 15:40:19 crc kubenswrapper[5021]: I0121 15:40:19.641881 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-58495d798b-lxjdg" Jan 21 15:40:20 crc kubenswrapper[5021]: E0121 15:40:20.140458 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/neutron-operator@sha256:0f440bf7dc937ce0135bdd328716686fd2f1320f453a9ac4e11e96383148ad6c\\\"\"" pod="openstack-operators/neutron-operator-controller-manager-cb4666565-jk57l" podUID="dd66a6c4-dee4-4079-ac1c-d838cc27f752" Jan 21 15:40:20 crc kubenswrapper[5021]: E0121 15:40:20.345296 5021 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/mariadb-operator@sha256:ff0b6c27e2d96afccd73fbbb5b5297a3f60c7f4f1dfd2a877152466697018d71" Jan 21 15:40:20 crc kubenswrapper[5021]: E0121 15:40:20.345498 5021 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/mariadb-operator@sha256:ff0b6c27e2d96afccd73fbbb5b5297a3f60c7f4f1dfd2a877152466697018d71,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-4kctg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod mariadb-operator-controller-manager-c87fff755-tr25w_openstack-operators(4180df4a-4632-4c29-b5cf-a597b93d4541): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 21 15:40:20 crc kubenswrapper[5021]: E0121 15:40:20.346831 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-tr25w" podUID="4180df4a-4632-4c29-b5cf-a597b93d4541" Jan 21 15:40:20 crc kubenswrapper[5021]: E0121 15:40:20.936151 5021 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/barbican-operator@sha256:f0634d8cf7c2c2919ca248a6883ce43d6ae4ac59252c987a5cfe17643fe7d38a" Jan 21 15:40:20 crc kubenswrapper[5021]: E0121 15:40:20.936340 5021 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/barbican-operator@sha256:f0634d8cf7c2c2919ca248a6883ce43d6ae4ac59252c987a5cfe17643fe7d38a,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-2w6lp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-operator-controller-manager-7ddb5c749-x4l62_openstack-operators(724463d5-2779-4504-bbd1-4c12353a665c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 21 15:40:20 crc kubenswrapper[5021]: E0121 15:40:20.937565 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/barbican-operator-controller-manager-7ddb5c749-x4l62" podUID="724463d5-2779-4504-bbd1-4c12353a665c" Jan 21 15:40:21 crc kubenswrapper[5021]: E0121 15:40:21.145842 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/mariadb-operator@sha256:ff0b6c27e2d96afccd73fbbb5b5297a3f60c7f4f1dfd2a877152466697018d71\\\"\"" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-tr25w" podUID="4180df4a-4632-4c29-b5cf-a597b93d4541" Jan 21 15:40:21 crc kubenswrapper[5021]: E0121 15:40:21.146334 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/barbican-operator@sha256:f0634d8cf7c2c2919ca248a6883ce43d6ae4ac59252c987a5cfe17643fe7d38a\\\"\"" pod="openstack-operators/barbican-operator-controller-manager-7ddb5c749-x4l62" podUID="724463d5-2779-4504-bbd1-4c12353a665c" Jan 21 15:40:21 crc kubenswrapper[5021]: E0121 15:40:21.571496 5021 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/keystone-operator@sha256:393d7567eef4fd05af625389f5a7384c6bb75108b21b06183f1f5e33aac5417e" Jan 21 15:40:21 crc kubenswrapper[5021]: E0121 15:40:21.571719 5021 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/keystone-operator@sha256:393d7567eef4fd05af625389f5a7384c6bb75108b21b06183f1f5e33aac5417e,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-sn6hx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-767fdc4f47-wj9sz_openstack-operators(c20dfe07-4e4b-44e0-a260-ff4958985c0c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 21 15:40:21 crc kubenswrapper[5021]: E0121 15:40:21.573183 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/keystone-operator-controller-manager-767fdc4f47-wj9sz" podUID="c20dfe07-4e4b-44e0-a260-ff4958985c0c" Jan 21 15:40:22 crc kubenswrapper[5021]: E0121 15:40:22.151991 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/keystone-operator@sha256:393d7567eef4fd05af625389f5a7384c6bb75108b21b06183f1f5e33aac5417e\\\"\"" pod="openstack-operators/keystone-operator-controller-manager-767fdc4f47-wj9sz" podUID="c20dfe07-4e4b-44e0-a260-ff4958985c0c" Jan 21 15:40:24 crc kubenswrapper[5021]: I0121 15:40:24.836790 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-9tpnf"] Jan 21 15:40:27 crc kubenswrapper[5021]: I0121 15:40:27.186717 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9tpnf" event={"ID":"7e230411-33c2-44ef-b28f-8833c893c0eb","Type":"ContainerStarted","Data":"602ca1185b763744484531bd889d9806a103c2e8a878ef0c8216f2f615fef194"} Jan 21 15:40:29 crc kubenswrapper[5021]: I0121 15:40:29.254452 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854sp4g6"] Jan 21 15:40:29 crc kubenswrapper[5021]: I0121 15:40:29.418467 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-58495d798b-lxjdg"] Jan 21 15:40:31 crc kubenswrapper[5021]: I0121 15:40:31.242651 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854sp4g6" event={"ID":"25709945-8415-492c-a829-fd79f3fbe521","Type":"ContainerStarted","Data":"edb98fcd75818b582558bee64d8b237a64126ea1446616fdc4d0d727ba71d7ac"} Jan 21 15:40:31 crc kubenswrapper[5021]: I0121 15:40:31.245376 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-9f958b845-tjspf" event={"ID":"6d2f5c7c-f0d7-405b-b9cf-427ea840f7c0","Type":"ContainerStarted","Data":"93d26b8a7d18ed763b5844382ad7b5ceb964069a1a303663ba35a5964f32a48b"} Jan 21 15:40:31 crc kubenswrapper[5021]: I0121 15:40:31.245502 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-9f958b845-tjspf" Jan 21 15:40:31 crc kubenswrapper[5021]: I0121 15:40:31.246900 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-58495d798b-lxjdg" event={"ID":"727fe8cb-51ad-433f-90e1-5998b948799a","Type":"ContainerStarted","Data":"b3a635cb9ce66df620e5c95a58e091bde1f05f967afd58760f0004ac0ef995d8"} Jan 21 15:40:31 crc kubenswrapper[5021]: I0121 15:40:31.260816 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-9f958b845-tjspf" podStartSLOduration=12.180587742 podStartE2EDuration="29.260798626s" podCreationTimestamp="2026-01-21 15:40:02 +0000 UTC" firstStartedPulling="2026-01-21 15:40:04.463350375 +0000 UTC m=+945.998464264" lastFinishedPulling="2026-01-21 15:40:21.543561259 +0000 UTC m=+963.078675148" observedRunningTime="2026-01-21 15:40:31.259921842 +0000 UTC m=+972.795035731" watchObservedRunningTime="2026-01-21 15:40:31.260798626 +0000 UTC m=+972.795912515" Jan 21 15:40:32 crc kubenswrapper[5021]: I0121 15:40:32.281246 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-85dd56d4cc-msh7h" event={"ID":"e0c21887-edf1-4362-b212-456e024d2cd9","Type":"ContainerStarted","Data":"b25957097b57be755eb4d5d03c20b911b9c0769cc484a9de0c23be7656584564"} Jan 21 15:40:32 crc kubenswrapper[5021]: I0121 15:40:32.281829 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-85dd56d4cc-msh7h" Jan 21 15:40:32 crc kubenswrapper[5021]: I0121 15:40:32.311177 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-78757b4889-2bt46" event={"ID":"9fdec40b-ea8d-4d5e-82ac-27b0a76f450b","Type":"ContainerStarted","Data":"b99520658daaedc70e69684908b95db246f9b077385cd04da6284a2fb0ad385e"} Jan 21 15:40:32 crc kubenswrapper[5021]: I0121 15:40:32.313629 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-686df47fcb-tcnr8" event={"ID":"00e6bc09-d424-4ed1-b62a-b6fadc7416ec","Type":"ContainerStarted","Data":"c26e8b12d47762e4d458ddb2aa7eb457e85832e5026ffc0e58cb6f7c230e3932"} Jan 21 15:40:32 crc kubenswrapper[5021]: I0121 15:40:32.314490 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-686df47fcb-tcnr8" Jan 21 15:40:32 crc kubenswrapper[5021]: I0121 15:40:32.342611 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-jgqdp" event={"ID":"48921cb7-8983-4b8e-87cd-3316190ede3e","Type":"ContainerStarted","Data":"f69f78cd07fa26aebce79f2ba217fc1ec1837c7d64ff544521867cf052758baf"} Jan 21 15:40:32 crc kubenswrapper[5021]: I0121 15:40:32.343471 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-85dd56d4cc-msh7h" podStartSLOduration=6.738053582 podStartE2EDuration="30.34345383s" podCreationTimestamp="2026-01-21 15:40:02 +0000 UTC" firstStartedPulling="2026-01-21 15:40:05.252743143 +0000 UTC m=+946.787857032" lastFinishedPulling="2026-01-21 15:40:28.858143391 +0000 UTC m=+970.393257280" observedRunningTime="2026-01-21 15:40:32.341455115 +0000 UTC m=+973.876569004" watchObservedRunningTime="2026-01-21 15:40:32.34345383 +0000 UTC m=+973.878567719" Jan 21 15:40:32 crc kubenswrapper[5021]: I0121 15:40:32.343699 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-jgqdp" Jan 21 15:40:32 crc kubenswrapper[5021]: I0121 15:40:32.346370 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-7cd8bc9dbb-r274t" event={"ID":"25190772-2e7e-4e99-9df6-727b970a7930","Type":"ContainerStarted","Data":"49274ec49f4ce008fa5853c6a5a2ba6583522a70295178fbb06c4c86cd566b1f"} Jan 21 15:40:32 crc kubenswrapper[5021]: I0121 15:40:32.347038 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-7cd8bc9dbb-r274t" Jan 21 15:40:32 crc kubenswrapper[5021]: I0121 15:40:32.357845 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-864f6b75bf-9bzn7" event={"ID":"f9c1fef7-2823-4ebc-866a-adea991f6b5c","Type":"ContainerStarted","Data":"2a2f98856de57aba827defdcae8e887e2e1e2b075bf9e25b9b8e760abbf92428"} Jan 21 15:40:32 crc kubenswrapper[5021]: I0121 15:40:32.358364 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-864f6b75bf-9bzn7" Jan 21 15:40:32 crc kubenswrapper[5021]: I0121 15:40:32.398729 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-78757b4889-2bt46" podStartSLOduration=13.685942476 podStartE2EDuration="30.398706382s" podCreationTimestamp="2026-01-21 15:40:02 +0000 UTC" firstStartedPulling="2026-01-21 15:40:04.829716664 +0000 UTC m=+946.364830553" lastFinishedPulling="2026-01-21 15:40:21.54248057 +0000 UTC m=+963.077594459" observedRunningTime="2026-01-21 15:40:32.370380267 +0000 UTC m=+973.905494156" watchObservedRunningTime="2026-01-21 15:40:32.398706382 +0000 UTC m=+973.933820271" Jan 21 15:40:32 crc kubenswrapper[5021]: I0121 15:40:32.403523 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-58495d798b-lxjdg" event={"ID":"727fe8cb-51ad-433f-90e1-5998b948799a","Type":"ContainerStarted","Data":"f0dc357e15b5f4dd6364ea3c6ff2060ffd9445eaf60613e3e119acc1ed4e2530"} Jan 21 15:40:32 crc kubenswrapper[5021]: I0121 15:40:32.404167 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-58495d798b-lxjdg" Jan 21 15:40:32 crc kubenswrapper[5021]: I0121 15:40:32.410713 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-686df47fcb-tcnr8" podStartSLOduration=11.867559484000001 podStartE2EDuration="30.41069849s" podCreationTimestamp="2026-01-21 15:40:02 +0000 UTC" firstStartedPulling="2026-01-21 15:40:04.810287442 +0000 UTC m=+946.345401331" lastFinishedPulling="2026-01-21 15:40:23.353426448 +0000 UTC m=+964.888540337" observedRunningTime="2026-01-21 15:40:32.409697773 +0000 UTC m=+973.944811672" watchObservedRunningTime="2026-01-21 15:40:32.41069849 +0000 UTC m=+973.945812369" Jan 21 15:40:32 crc kubenswrapper[5021]: I0121 15:40:32.429163 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-zl9lj" event={"ID":"7645cfbe-28a4-4098-af64-3be341c2306f","Type":"ContainerStarted","Data":"8496021fe7e5d2a44e24b39f3a6359c7282b9fafd88cfad7f079a74175981c50"} Jan 21 15:40:32 crc kubenswrapper[5021]: I0121 15:40:32.430246 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-zl9lj" Jan 21 15:40:32 crc kubenswrapper[5021]: I0121 15:40:32.443549 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-65849867d6-xm9sm" event={"ID":"569260c0-7874-41fa-9114-66643a79cdfe","Type":"ContainerStarted","Data":"3d91e41ed78a6abd524bc703fd3456a4e9fa5d76f175d4118454a5316201b6b9"} Jan 21 15:40:32 crc kubenswrapper[5021]: I0121 15:40:32.444563 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-65849867d6-xm9sm" Jan 21 15:40:32 crc kubenswrapper[5021]: I0121 15:40:32.449418 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-864f6b75bf-9bzn7" podStartSLOduration=13.009800039 podStartE2EDuration="30.449390779s" podCreationTimestamp="2026-01-21 15:40:02 +0000 UTC" firstStartedPulling="2026-01-21 15:40:04.908610463 +0000 UTC m=+946.443724352" lastFinishedPulling="2026-01-21 15:40:22.348201193 +0000 UTC m=+963.883315092" observedRunningTime="2026-01-21 15:40:32.44064396 +0000 UTC m=+973.975757859" watchObservedRunningTime="2026-01-21 15:40:32.449390779 +0000 UTC m=+973.984504668" Jan 21 15:40:32 crc kubenswrapper[5021]: I0121 15:40:32.460797 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-s6lsx" event={"ID":"fcd8d00d-0a93-400d-8c23-eb51dbf56a35","Type":"ContainerStarted","Data":"ef8123edf3a0167b50a38deef8b5dc03dc626e311d195a8dd1c83d2e36cae90f"} Jan 21 15:40:32 crc kubenswrapper[5021]: I0121 15:40:32.460842 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-s6lsx" Jan 21 15:40:32 crc kubenswrapper[5021]: I0121 15:40:32.483365 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-kmmh6" event={"ID":"0ca88d8a-abb8-498b-9588-376e3cc3a49e","Type":"ContainerStarted","Data":"52cefcfa8d6b53dc3c707107d6b02437be2e48160def408d305fdc762fae4b57"} Jan 21 15:40:32 crc kubenswrapper[5021]: I0121 15:40:32.483979 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-kmmh6" Jan 21 15:40:32 crc kubenswrapper[5021]: I0121 15:40:32.492690 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-7cd8bc9dbb-r274t" podStartSLOduration=4.788471908 podStartE2EDuration="30.492665283s" podCreationTimestamp="2026-01-21 15:40:02 +0000 UTC" firstStartedPulling="2026-01-21 15:40:05.231214344 +0000 UTC m=+946.766328233" lastFinishedPulling="2026-01-21 15:40:30.935407719 +0000 UTC m=+972.470521608" observedRunningTime="2026-01-21 15:40:32.483560745 +0000 UTC m=+974.018674634" watchObservedRunningTime="2026-01-21 15:40:32.492665283 +0000 UTC m=+974.027779172" Jan 21 15:40:32 crc kubenswrapper[5021]: I0121 15:40:32.504321 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-9b68f5989-rkltx" event={"ID":"33f53fd4-7cbc-4e1e-a72a-e48eee9ca274","Type":"ContainerStarted","Data":"94f2f10751b24fe2d09a91b010113cd93a88e9c840d3c344b27b2b2ff8ab1c6e"} Jan 21 15:40:32 crc kubenswrapper[5021]: I0121 15:40:32.505437 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-9b68f5989-rkltx" Jan 21 15:40:32 crc kubenswrapper[5021]: I0121 15:40:32.527309 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-96kl8" event={"ID":"46f232dd-a469-4c74-b456-ba1b8f80b32a","Type":"ContainerStarted","Data":"4b0e5f62726decca545011350c2aca9dbf9cc5e7ce1918e2e11c9c89a9b90cb1"} Jan 21 15:40:32 crc kubenswrapper[5021]: I0121 15:40:32.528227 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-96kl8" Jan 21 15:40:32 crc kubenswrapper[5021]: I0121 15:40:32.529546 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-jgqdp" podStartSLOduration=6.882824255 podStartE2EDuration="30.529528713s" podCreationTimestamp="2026-01-21 15:40:02 +0000 UTC" firstStartedPulling="2026-01-21 15:40:05.211298339 +0000 UTC m=+946.746412228" lastFinishedPulling="2026-01-21 15:40:28.858002797 +0000 UTC m=+970.393116686" observedRunningTime="2026-01-21 15:40:32.527405814 +0000 UTC m=+974.062519703" watchObservedRunningTime="2026-01-21 15:40:32.529528713 +0000 UTC m=+974.064642602" Jan 21 15:40:32 crc kubenswrapper[5021]: I0121 15:40:32.563235 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-64cd966744-p4nrl" event={"ID":"794724a4-8705-4860-a126-6baefc733a24","Type":"ContainerStarted","Data":"76ca642f9a8e31824c1cf38313477f929588ac7d7ebbda12ec29b38ac736f927"} Jan 21 15:40:32 crc kubenswrapper[5021]: I0121 15:40:32.564792 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-64cd966744-p4nrl" Jan 21 15:40:32 crc kubenswrapper[5021]: I0121 15:40:32.590626 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-x6zvw" event={"ID":"37d25098-ad0c-459e-b6e7-6b11d269606b","Type":"ContainerStarted","Data":"6c91dbb01d0cf0b2e9dc10b824a168f363c8cee231de5cc00ef025205a651258"} Jan 21 15:40:32 crc kubenswrapper[5021]: I0121 15:40:32.603663 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-58495d798b-lxjdg" podStartSLOduration=29.603644752 podStartE2EDuration="29.603644752s" podCreationTimestamp="2026-01-21 15:40:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:40:32.603395974 +0000 UTC m=+974.138509863" watchObservedRunningTime="2026-01-21 15:40:32.603644752 +0000 UTC m=+974.138758641" Jan 21 15:40:32 crc kubenswrapper[5021]: I0121 15:40:32.615995 5021 generic.go:334] "Generic (PLEG): container finished" podID="7e230411-33c2-44ef-b28f-8833c893c0eb" containerID="de3c7f812d08c0314aeabaf6c89f0044d5c4e65775dbd03f394379ab339d6d6e" exitCode=0 Jan 21 15:40:32 crc kubenswrapper[5021]: I0121 15:40:32.616559 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9tpnf" event={"ID":"7e230411-33c2-44ef-b28f-8833c893c0eb","Type":"ContainerDied","Data":"de3c7f812d08c0314aeabaf6c89f0044d5c4e65775dbd03f394379ab339d6d6e"} Jan 21 15:40:32 crc kubenswrapper[5021]: I0121 15:40:32.660355 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-s6lsx" podStartSLOduration=13.184601373 podStartE2EDuration="30.660337293s" podCreationTimestamp="2026-01-21 15:40:02 +0000 UTC" firstStartedPulling="2026-01-21 15:40:04.871858437 +0000 UTC m=+946.406972326" lastFinishedPulling="2026-01-21 15:40:22.347594367 +0000 UTC m=+963.882708246" observedRunningTime="2026-01-21 15:40:32.659345056 +0000 UTC m=+974.194458955" watchObservedRunningTime="2026-01-21 15:40:32.660337293 +0000 UTC m=+974.195451182" Jan 21 15:40:32 crc kubenswrapper[5021]: I0121 15:40:32.710012 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-kmmh6" podStartSLOduration=5.040821375 podStartE2EDuration="30.709986023s" podCreationTimestamp="2026-01-21 15:40:02 +0000 UTC" firstStartedPulling="2026-01-21 15:40:05.252796444 +0000 UTC m=+946.787910333" lastFinishedPulling="2026-01-21 15:40:30.921961092 +0000 UTC m=+972.457074981" observedRunningTime="2026-01-21 15:40:32.70515863 +0000 UTC m=+974.240272519" watchObservedRunningTime="2026-01-21 15:40:32.709986023 +0000 UTC m=+974.245099922" Jan 21 15:40:32 crc kubenswrapper[5021]: I0121 15:40:32.768662 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-65849867d6-xm9sm" podStartSLOduration=5.0983630699999996 podStartE2EDuration="30.768642607s" podCreationTimestamp="2026-01-21 15:40:02 +0000 UTC" firstStartedPulling="2026-01-21 15:40:05.252494716 +0000 UTC m=+946.787608605" lastFinishedPulling="2026-01-21 15:40:30.922774253 +0000 UTC m=+972.457888142" observedRunningTime="2026-01-21 15:40:32.763875517 +0000 UTC m=+974.298989406" watchObservedRunningTime="2026-01-21 15:40:32.768642607 +0000 UTC m=+974.303756496" Jan 21 15:40:32 crc kubenswrapper[5021]: I0121 15:40:32.781407 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-x6zvw" podStartSLOduration=3.9234691440000002 podStartE2EDuration="29.781386937s" podCreationTimestamp="2026-01-21 15:40:03 +0000 UTC" firstStartedPulling="2026-01-21 15:40:05.256521076 +0000 UTC m=+946.791634965" lastFinishedPulling="2026-01-21 15:40:31.114438869 +0000 UTC m=+972.649552758" observedRunningTime="2026-01-21 15:40:32.740369914 +0000 UTC m=+974.275483803" watchObservedRunningTime="2026-01-21 15:40:32.781386937 +0000 UTC m=+974.316500826" Jan 21 15:40:32 crc kubenswrapper[5021]: I0121 15:40:32.792371 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-96kl8" podStartSLOduration=13.613433742 podStartE2EDuration="30.792348566s" podCreationTimestamp="2026-01-21 15:40:02 +0000 UTC" firstStartedPulling="2026-01-21 15:40:05.16932527 +0000 UTC m=+946.704439159" lastFinishedPulling="2026-01-21 15:40:22.348240084 +0000 UTC m=+963.883353983" observedRunningTime="2026-01-21 15:40:32.790926427 +0000 UTC m=+974.326040316" watchObservedRunningTime="2026-01-21 15:40:32.792348566 +0000 UTC m=+974.327462455" Jan 21 15:40:32 crc kubenswrapper[5021]: I0121 15:40:32.883294 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-9b68f5989-rkltx" podStartSLOduration=12.974785471 podStartE2EDuration="30.883255034s" podCreationTimestamp="2026-01-21 15:40:02 +0000 UTC" firstStartedPulling="2026-01-21 15:40:04.43972523 +0000 UTC m=+945.974839119" lastFinishedPulling="2026-01-21 15:40:22.348194783 +0000 UTC m=+963.883308682" observedRunningTime="2026-01-21 15:40:32.838096449 +0000 UTC m=+974.373210338" watchObservedRunningTime="2026-01-21 15:40:32.883255034 +0000 UTC m=+974.418368923" Jan 21 15:40:32 crc kubenswrapper[5021]: I0121 15:40:32.885245 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-64cd966744-p4nrl" podStartSLOduration=14.590857764999999 podStartE2EDuration="30.885233379s" podCreationTimestamp="2026-01-21 15:40:02 +0000 UTC" firstStartedPulling="2026-01-21 15:40:05.248085925 +0000 UTC m=+946.783199814" lastFinishedPulling="2026-01-21 15:40:21.542461539 +0000 UTC m=+963.077575428" observedRunningTime="2026-01-21 15:40:32.879891073 +0000 UTC m=+974.415004972" watchObservedRunningTime="2026-01-21 15:40:32.885233379 +0000 UTC m=+974.420347268" Jan 21 15:40:32 crc kubenswrapper[5021]: I0121 15:40:32.914735 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-zl9lj" podStartSLOduration=13.834684758 podStartE2EDuration="30.914712526s" podCreationTimestamp="2026-01-21 15:40:02 +0000 UTC" firstStartedPulling="2026-01-21 15:40:04.463369076 +0000 UTC m=+945.998482965" lastFinishedPulling="2026-01-21 15:40:21.543396844 +0000 UTC m=+963.078510733" observedRunningTime="2026-01-21 15:40:32.912254049 +0000 UTC m=+974.447367938" watchObservedRunningTime="2026-01-21 15:40:32.914712526 +0000 UTC m=+974.449826415" Jan 21 15:40:32 crc kubenswrapper[5021]: I0121 15:40:32.944044 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-78757b4889-2bt46" Jan 21 15:40:33 crc kubenswrapper[5021]: I0121 15:40:33.626871 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-c6994669c-qswjh" event={"ID":"8e5df137-5a39-434e-9ed8-cd984d3cfecb","Type":"ContainerStarted","Data":"d1536d273850565b1eefe07eed182e4882491b6095a82c90726faa81b844001d"} Jan 21 15:40:33 crc kubenswrapper[5021]: I0121 15:40:33.627512 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-c6994669c-qswjh" Jan 21 15:40:33 crc kubenswrapper[5021]: I0121 15:40:33.629090 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-tr25w" event={"ID":"4180df4a-4632-4c29-b5cf-a597b93d4541","Type":"ContainerStarted","Data":"295eee155db18226132e7a6d79250891801f5ff40df4dad0c81c60a90e2b03a0"} Jan 21 15:40:33 crc kubenswrapper[5021]: I0121 15:40:33.629261 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-tr25w" Jan 21 15:40:33 crc kubenswrapper[5021]: I0121 15:40:33.631136 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-cb4666565-jk57l" event={"ID":"dd66a6c4-dee4-4079-ac1c-d838cc27f752","Type":"ContainerStarted","Data":"36bd9a526dd819528dbf75af2bb2454d753662231358816ed5b3717c3fba2777"} Jan 21 15:40:33 crc kubenswrapper[5021]: I0121 15:40:33.631264 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-cb4666565-jk57l" Jan 21 15:40:33 crc kubenswrapper[5021]: I0121 15:40:33.634271 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9tpnf" event={"ID":"7e230411-33c2-44ef-b28f-8833c893c0eb","Type":"ContainerStarted","Data":"86d1060c3a13fd9fe214ca368a6dd47b1f340800081d3bde71963495c88a31b3"} Jan 21 15:40:33 crc kubenswrapper[5021]: I0121 15:40:33.655607 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-c6994669c-qswjh" podStartSLOduration=4.123222229 podStartE2EDuration="31.655585955s" podCreationTimestamp="2026-01-21 15:40:02 +0000 UTC" firstStartedPulling="2026-01-21 15:40:04.869978966 +0000 UTC m=+946.405092855" lastFinishedPulling="2026-01-21 15:40:32.402342692 +0000 UTC m=+973.937456581" observedRunningTime="2026-01-21 15:40:33.649073526 +0000 UTC m=+975.184187435" watchObservedRunningTime="2026-01-21 15:40:33.655585955 +0000 UTC m=+975.190699844" Jan 21 15:40:33 crc kubenswrapper[5021]: I0121 15:40:33.670855 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-tr25w" podStartSLOduration=3.223527304 podStartE2EDuration="31.670837192s" podCreationTimestamp="2026-01-21 15:40:02 +0000 UTC" firstStartedPulling="2026-01-21 15:40:04.810835428 +0000 UTC m=+946.345949317" lastFinishedPulling="2026-01-21 15:40:33.258145316 +0000 UTC m=+974.793259205" observedRunningTime="2026-01-21 15:40:33.668507848 +0000 UTC m=+975.203621737" watchObservedRunningTime="2026-01-21 15:40:33.670837192 +0000 UTC m=+975.205951081" Jan 21 15:40:33 crc kubenswrapper[5021]: I0121 15:40:33.730290 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-cb4666565-jk57l" podStartSLOduration=3.723033275 podStartE2EDuration="31.730271379s" podCreationTimestamp="2026-01-21 15:40:02 +0000 UTC" firstStartedPulling="2026-01-21 15:40:05.180230048 +0000 UTC m=+946.715343937" lastFinishedPulling="2026-01-21 15:40:33.187468142 +0000 UTC m=+974.722582041" observedRunningTime="2026-01-21 15:40:33.729333124 +0000 UTC m=+975.264447013" watchObservedRunningTime="2026-01-21 15:40:33.730271379 +0000 UTC m=+975.265385268" Jan 21 15:40:34 crc kubenswrapper[5021]: I0121 15:40:34.567860 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/696e3c0f-78c0-4517-8def-49fbe8728f48-cert\") pod \"infra-operator-controller-manager-77c48c7859-njwdd\" (UID: \"696e3c0f-78c0-4517-8def-49fbe8728f48\") " pod="openstack-operators/infra-operator-controller-manager-77c48c7859-njwdd" Jan 21 15:40:34 crc kubenswrapper[5021]: I0121 15:40:34.582971 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/696e3c0f-78c0-4517-8def-49fbe8728f48-cert\") pod \"infra-operator-controller-manager-77c48c7859-njwdd\" (UID: \"696e3c0f-78c0-4517-8def-49fbe8728f48\") " pod="openstack-operators/infra-operator-controller-manager-77c48c7859-njwdd" Jan 21 15:40:34 crc kubenswrapper[5021]: I0121 15:40:34.641148 5021 generic.go:334] "Generic (PLEG): container finished" podID="7e230411-33c2-44ef-b28f-8833c893c0eb" containerID="86d1060c3a13fd9fe214ca368a6dd47b1f340800081d3bde71963495c88a31b3" exitCode=0 Jan 21 15:40:34 crc kubenswrapper[5021]: I0121 15:40:34.641241 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9tpnf" event={"ID":"7e230411-33c2-44ef-b28f-8833c893c0eb","Type":"ContainerDied","Data":"86d1060c3a13fd9fe214ca368a6dd47b1f340800081d3bde71963495c88a31b3"} Jan 21 15:40:34 crc kubenswrapper[5021]: I0121 15:40:34.680711 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-sv5l9" Jan 21 15:40:34 crc kubenswrapper[5021]: I0121 15:40:34.689328 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-77c48c7859-njwdd" Jan 21 15:40:39 crc kubenswrapper[5021]: I0121 15:40:39.650446 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-58495d798b-lxjdg" Jan 21 15:40:42 crc kubenswrapper[5021]: I0121 15:40:42.356600 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 15:40:42 crc kubenswrapper[5021]: I0121 15:40:42.356840 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 15:40:42 crc kubenswrapper[5021]: I0121 15:40:42.356882 5021 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" Jan 21 15:40:42 crc kubenswrapper[5021]: I0121 15:40:42.357497 5021 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2ea8d15572e39e256e507de01c714d92322ce002bb3c73880321feefeec92859"} pod="openshift-machine-config-operator/machine-config-daemon-n22xz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 21 15:40:42 crc kubenswrapper[5021]: I0121 15:40:42.357553 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" containerID="cri-o://2ea8d15572e39e256e507de01c714d92322ce002bb3c73880321feefeec92859" gracePeriod=600 Jan 21 15:40:42 crc kubenswrapper[5021]: E0121 15:40:42.495338 5021 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd40ced49_d7c3_4d2a_ba74_85d60a4cf3b1.slice/crio-2ea8d15572e39e256e507de01c714d92322ce002bb3c73880321feefeec92859.scope\": RecentStats: unable to find data in memory cache]" Jan 21 15:40:42 crc kubenswrapper[5021]: I0121 15:40:42.598126 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-9b68f5989-rkltx" Jan 21 15:40:42 crc kubenswrapper[5021]: I0121 15:40:42.606922 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-9f958b845-tjspf" Jan 21 15:40:42 crc kubenswrapper[5021]: I0121 15:40:42.640602 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-c6994669c-qswjh" Jan 21 15:40:42 crc kubenswrapper[5021]: I0121 15:40:42.677678 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-zl9lj" Jan 21 15:40:42 crc kubenswrapper[5021]: I0121 15:40:42.710015 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-s6lsx" Jan 21 15:40:42 crc kubenswrapper[5021]: I0121 15:40:42.941310 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-78757b4889-2bt46" Jan 21 15:40:43 crc kubenswrapper[5021]: I0121 15:40:43.068986 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-864f6b75bf-9bzn7" Jan 21 15:40:43 crc kubenswrapper[5021]: I0121 15:40:43.165675 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-tr25w" Jan 21 15:40:43 crc kubenswrapper[5021]: I0121 15:40:43.166400 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-65849867d6-xm9sm" Jan 21 15:40:43 crc kubenswrapper[5021]: I0121 15:40:43.215873 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-cb4666565-jk57l" Jan 21 15:40:43 crc kubenswrapper[5021]: I0121 15:40:43.528124 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-jgqdp" Jan 21 15:40:43 crc kubenswrapper[5021]: I0121 15:40:43.610288 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-686df47fcb-tcnr8" Jan 21 15:40:43 crc kubenswrapper[5021]: I0121 15:40:43.883378 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-kmmh6" Jan 21 15:40:43 crc kubenswrapper[5021]: I0121 15:40:43.884192 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-96kl8" Jan 21 15:40:43 crc kubenswrapper[5021]: I0121 15:40:43.969962 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-7cd8bc9dbb-r274t" Jan 21 15:40:43 crc kubenswrapper[5021]: I0121 15:40:43.996797 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-64cd966744-p4nrl" Jan 21 15:40:44 crc kubenswrapper[5021]: I0121 15:40:44.008883 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-85dd56d4cc-msh7h" Jan 21 15:40:44 crc kubenswrapper[5021]: I0121 15:40:44.723170 5021 generic.go:334] "Generic (PLEG): container finished" podID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerID="2ea8d15572e39e256e507de01c714d92322ce002bb3c73880321feefeec92859" exitCode=0 Jan 21 15:40:44 crc kubenswrapper[5021]: I0121 15:40:44.723213 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" event={"ID":"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1","Type":"ContainerDied","Data":"2ea8d15572e39e256e507de01c714d92322ce002bb3c73880321feefeec92859"} Jan 21 15:40:44 crc kubenswrapper[5021]: I0121 15:40:44.723248 5021 scope.go:117] "RemoveContainer" containerID="430f8628012005aeb399f1f5973bccc495f1ff35af45c0e956eec98511b34a03" Jan 21 15:40:54 crc kubenswrapper[5021]: I0121 15:40:54.064711 5021 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 21 15:40:54 crc kubenswrapper[5021]: E0121 15:40:54.089921 5021 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:dae767a3ae652ffc70ba60c5bf2b5bf72c12d939353053e231b258948ededb22" Jan 21 15:40:54 crc kubenswrapper[5021]: E0121 15:40:54.090373 5021 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:dae767a3ae652ffc70ba60c5bf2b5bf72c12d939353053e231b258948ededb22,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:true,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-baremetal-operator-agent:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_ANSIBLEEE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-ansibleee-runner:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_EVALUATOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-evaluator:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-listener:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_NOTIFIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-notifier:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_APACHE_IMAGE_URL_DEFAULT,Value:registry.redhat.io/ubi9/httpd-24:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_KEYSTONE_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-keystone-listener:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-compute:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_IPMI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_MYSQLD_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/mysqld-exporter:v0.15.1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_NOTIFICATION_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-notification:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_SGCORE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/sg-core:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_BACKUP_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-backup:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_VOLUME_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-volume:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CLOUDKITTY_API_IMAGE_URL_DEFAULT,Value:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CLOUDKITTY_PROC_IMAGE_URL_DEFAULT,Value:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-processor:current,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_BACKENDBIND9_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-backend-bind9:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-central:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_MDNS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-mdns:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_PRODUCER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-producer:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_UNBOUND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-unbound:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_FRR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-frr:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_ISCSID_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-iscsid:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_KEPLER_IMAGE_URL_DEFAULT,Value:quay.io/sustainable_computing_io/kepler:release-0.7.12,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_LOGROTATE_CROND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cron:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_MULTIPATHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-multipathd:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_DHCP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-dhcp-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_METADATA_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_OVN_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-ovn-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_SRIOV_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-sriov-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NODE_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/node-exporter:v1.5.0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_OVN_BGP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-bgp-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_PODMAN_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/navidys/prometheus-podman-exporter:v1.10.1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_GLANCE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-glance-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_CFNAPI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api-cfn:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HORIZON_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_MEMCACHED_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-memcached:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_REDIS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-redis:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-conductor:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_INSPECTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-inspector:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_NEUTRON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-neutron-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PXE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-pxe:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PYTHON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/ironic-python-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KEYSTONE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-keystone:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KSM_IMAGE_URL_DEFAULT,Value:registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SHARE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-share:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MARIADB_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NET_UTILS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-netutils:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NEUTRON_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-conductor:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_NOVNC_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-novncproxy:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HEALTHMANAGER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-health-manager:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HOUSEKEEPING_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-housekeeping:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_RSYSLOG_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rsyslog:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_CLIENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_MUST_GATHER_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-must-gather:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_NETWORK_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OS_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/edpm-hardened-uefi:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_OVS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NORTHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-northd:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_SB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-sb-db-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_PLACEMENT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-placement-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_RABBITMQ_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_ACCOUNT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-account:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-container:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_OBJECT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-object:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_PROXY_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-proxy-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_TEST_TEMPEST_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_APPLIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-applier:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_DECISION_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-decision-engine:current-podified,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cert,ReadOnly:true,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-nhp6q,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-baremetal-operator-controller-manager-6b68b8b854sp4g6_openstack-operators(25709945-8415-492c-a829-fd79f3fbe521): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 21 15:40:54 crc kubenswrapper[5021]: E0121 15:40:54.093716 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854sp4g6" podUID="25709945-8415-492c-a829-fd79f3fbe521" Jan 21 15:40:54 crc kubenswrapper[5021]: I0121 15:40:54.567240 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-77c48c7859-njwdd"] Jan 21 15:40:54 crc kubenswrapper[5021]: W0121 15:40:54.571702 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod696e3c0f_78c0_4517_8def_49fbe8728f48.slice/crio-910d8a002887b2f69b02ea7534e0fb049a8773761f002d5a790117d8988a8fee WatchSource:0}: Error finding container 910d8a002887b2f69b02ea7534e0fb049a8773761f002d5a790117d8988a8fee: Status 404 returned error can't find the container with id 910d8a002887b2f69b02ea7534e0fb049a8773761f002d5a790117d8988a8fee Jan 21 15:40:54 crc kubenswrapper[5021]: I0121 15:40:54.792140 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-77c48c7859-njwdd" event={"ID":"696e3c0f-78c0-4517-8def-49fbe8728f48","Type":"ContainerStarted","Data":"910d8a002887b2f69b02ea7534e0fb049a8773761f002d5a790117d8988a8fee"} Jan 21 15:40:54 crc kubenswrapper[5021]: I0121 15:40:54.793705 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7ddb5c749-x4l62" event={"ID":"724463d5-2779-4504-bbd1-4c12353a665c","Type":"ContainerStarted","Data":"64902d97063be349a322789310b5af7ed86726447e364069fbd02b6c4411b917"} Jan 21 15:40:54 crc kubenswrapper[5021]: I0121 15:40:54.794513 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-7ddb5c749-x4l62" Jan 21 15:40:54 crc kubenswrapper[5021]: I0121 15:40:54.796530 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-767fdc4f47-wj9sz" event={"ID":"c20dfe07-4e4b-44e0-a260-ff4958985c0c","Type":"ContainerStarted","Data":"2cb2a31e4de81766f5c103dba2f0d18c75ee2207789dba85224e6a8f8a380faa"} Jan 21 15:40:54 crc kubenswrapper[5021]: I0121 15:40:54.796737 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-767fdc4f47-wj9sz" Jan 21 15:40:54 crc kubenswrapper[5021]: I0121 15:40:54.800055 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" event={"ID":"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1","Type":"ContainerStarted","Data":"3a78b8f8661ff4b02409ce688af67e4727c9123ec4900a181065acc3a089426c"} Jan 21 15:40:54 crc kubenswrapper[5021]: E0121 15:40:54.800164 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:dae767a3ae652ffc70ba60c5bf2b5bf72c12d939353053e231b258948ededb22\\\"\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854sp4g6" podUID="25709945-8415-492c-a829-fd79f3fbe521" Jan 21 15:40:54 crc kubenswrapper[5021]: I0121 15:40:54.816716 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-7ddb5c749-x4l62" podStartSLOduration=3.114001995 podStartE2EDuration="52.816698415s" podCreationTimestamp="2026-01-21 15:40:02 +0000 UTC" firstStartedPulling="2026-01-21 15:40:04.477692649 +0000 UTC m=+946.012806538" lastFinishedPulling="2026-01-21 15:40:54.180389049 +0000 UTC m=+995.715502958" observedRunningTime="2026-01-21 15:40:54.810009893 +0000 UTC m=+996.345123782" watchObservedRunningTime="2026-01-21 15:40:54.816698415 +0000 UTC m=+996.351812304" Jan 21 15:40:54 crc kubenswrapper[5021]: I0121 15:40:54.849338 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-767fdc4f47-wj9sz" podStartSLOduration=3.47771394 podStartE2EDuration="52.849317869s" podCreationTimestamp="2026-01-21 15:40:02 +0000 UTC" firstStartedPulling="2026-01-21 15:40:04.811241538 +0000 UTC m=+946.346355427" lastFinishedPulling="2026-01-21 15:40:54.182845467 +0000 UTC m=+995.717959356" observedRunningTime="2026-01-21 15:40:54.84755712 +0000 UTC m=+996.382671009" watchObservedRunningTime="2026-01-21 15:40:54.849317869 +0000 UTC m=+996.384431748" Jan 21 15:40:55 crc kubenswrapper[5021]: I0121 15:40:55.815615 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9tpnf" event={"ID":"7e230411-33c2-44ef-b28f-8833c893c0eb","Type":"ContainerStarted","Data":"d8c1c6a011af554208851fabd8c0745eff3580c8c6333e5e9eae35bca25a4b7b"} Jan 21 15:40:55 crc kubenswrapper[5021]: I0121 15:40:55.848232 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-9tpnf" podStartSLOduration=24.701896273 podStartE2EDuration="46.84821089s" podCreationTimestamp="2026-01-21 15:40:09 +0000 UTC" firstStartedPulling="2026-01-21 15:40:32.618150749 +0000 UTC m=+974.153264638" lastFinishedPulling="2026-01-21 15:40:54.764465366 +0000 UTC m=+996.299579255" observedRunningTime="2026-01-21 15:40:55.839940404 +0000 UTC m=+997.375054303" watchObservedRunningTime="2026-01-21 15:40:55.84821089 +0000 UTC m=+997.383324779" Jan 21 15:40:58 crc kubenswrapper[5021]: I0121 15:40:58.835644 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-77c48c7859-njwdd" event={"ID":"696e3c0f-78c0-4517-8def-49fbe8728f48","Type":"ContainerStarted","Data":"20c2269b7f44a809bca1441b1cf568e7cb753cd797cb51533670ccca28781d64"} Jan 21 15:40:58 crc kubenswrapper[5021]: I0121 15:40:58.836566 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-77c48c7859-njwdd" Jan 21 15:40:58 crc kubenswrapper[5021]: I0121 15:40:58.872801 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-77c48c7859-njwdd" podStartSLOduration=54.066585126 podStartE2EDuration="56.872775517s" podCreationTimestamp="2026-01-21 15:40:02 +0000 UTC" firstStartedPulling="2026-01-21 15:40:54.574326881 +0000 UTC m=+996.109440770" lastFinishedPulling="2026-01-21 15:40:57.380517272 +0000 UTC m=+998.915631161" observedRunningTime="2026-01-21 15:40:58.869612781 +0000 UTC m=+1000.404726670" watchObservedRunningTime="2026-01-21 15:40:58.872775517 +0000 UTC m=+1000.407889406" Jan 21 15:41:00 crc kubenswrapper[5021]: I0121 15:41:00.185671 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-9tpnf" Jan 21 15:41:00 crc kubenswrapper[5021]: I0121 15:41:00.186060 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-9tpnf" Jan 21 15:41:00 crc kubenswrapper[5021]: I0121 15:41:00.225094 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-9tpnf" Jan 21 15:41:00 crc kubenswrapper[5021]: I0121 15:41:00.889444 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-9tpnf" Jan 21 15:41:00 crc kubenswrapper[5021]: I0121 15:41:00.945137 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-9tpnf"] Jan 21 15:41:02 crc kubenswrapper[5021]: I0121 15:41:02.596196 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-7ddb5c749-x4l62" Jan 21 15:41:02 crc kubenswrapper[5021]: I0121 15:41:02.862639 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-9tpnf" podUID="7e230411-33c2-44ef-b28f-8833c893c0eb" containerName="registry-server" containerID="cri-o://d8c1c6a011af554208851fabd8c0745eff3580c8c6333e5e9eae35bca25a4b7b" gracePeriod=2 Jan 21 15:41:03 crc kubenswrapper[5021]: I0121 15:41:03.012473 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-767fdc4f47-wj9sz" Jan 21 15:41:03 crc kubenswrapper[5021]: I0121 15:41:03.278552 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9tpnf" Jan 21 15:41:03 crc kubenswrapper[5021]: I0121 15:41:03.398532 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7e230411-33c2-44ef-b28f-8833c893c0eb-utilities\") pod \"7e230411-33c2-44ef-b28f-8833c893c0eb\" (UID: \"7e230411-33c2-44ef-b28f-8833c893c0eb\") " Jan 21 15:41:03 crc kubenswrapper[5021]: I0121 15:41:03.398670 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hczxp\" (UniqueName: \"kubernetes.io/projected/7e230411-33c2-44ef-b28f-8833c893c0eb-kube-api-access-hczxp\") pod \"7e230411-33c2-44ef-b28f-8833c893c0eb\" (UID: \"7e230411-33c2-44ef-b28f-8833c893c0eb\") " Jan 21 15:41:03 crc kubenswrapper[5021]: I0121 15:41:03.398824 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7e230411-33c2-44ef-b28f-8833c893c0eb-catalog-content\") pod \"7e230411-33c2-44ef-b28f-8833c893c0eb\" (UID: \"7e230411-33c2-44ef-b28f-8833c893c0eb\") " Jan 21 15:41:03 crc kubenswrapper[5021]: I0121 15:41:03.404743 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7e230411-33c2-44ef-b28f-8833c893c0eb-utilities" (OuterVolumeSpecName: "utilities") pod "7e230411-33c2-44ef-b28f-8833c893c0eb" (UID: "7e230411-33c2-44ef-b28f-8833c893c0eb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:41:03 crc kubenswrapper[5021]: I0121 15:41:03.416821 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e230411-33c2-44ef-b28f-8833c893c0eb-kube-api-access-hczxp" (OuterVolumeSpecName: "kube-api-access-hczxp") pod "7e230411-33c2-44ef-b28f-8833c893c0eb" (UID: "7e230411-33c2-44ef-b28f-8833c893c0eb"). InnerVolumeSpecName "kube-api-access-hczxp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:41:03 crc kubenswrapper[5021]: I0121 15:41:03.459512 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7e230411-33c2-44ef-b28f-8833c893c0eb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7e230411-33c2-44ef-b28f-8833c893c0eb" (UID: "7e230411-33c2-44ef-b28f-8833c893c0eb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:41:03 crc kubenswrapper[5021]: I0121 15:41:03.502072 5021 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7e230411-33c2-44ef-b28f-8833c893c0eb-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 15:41:03 crc kubenswrapper[5021]: I0121 15:41:03.502123 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hczxp\" (UniqueName: \"kubernetes.io/projected/7e230411-33c2-44ef-b28f-8833c893c0eb-kube-api-access-hczxp\") on node \"crc\" DevicePath \"\"" Jan 21 15:41:03 crc kubenswrapper[5021]: I0121 15:41:03.502139 5021 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7e230411-33c2-44ef-b28f-8833c893c0eb-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 15:41:03 crc kubenswrapper[5021]: I0121 15:41:03.875226 5021 generic.go:334] "Generic (PLEG): container finished" podID="7e230411-33c2-44ef-b28f-8833c893c0eb" containerID="d8c1c6a011af554208851fabd8c0745eff3580c8c6333e5e9eae35bca25a4b7b" exitCode=0 Jan 21 15:41:03 crc kubenswrapper[5021]: I0121 15:41:03.875288 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9tpnf" event={"ID":"7e230411-33c2-44ef-b28f-8833c893c0eb","Type":"ContainerDied","Data":"d8c1c6a011af554208851fabd8c0745eff3580c8c6333e5e9eae35bca25a4b7b"} Jan 21 15:41:03 crc kubenswrapper[5021]: I0121 15:41:03.875324 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9tpnf" event={"ID":"7e230411-33c2-44ef-b28f-8833c893c0eb","Type":"ContainerDied","Data":"602ca1185b763744484531bd889d9806a103c2e8a878ef0c8216f2f615fef194"} Jan 21 15:41:03 crc kubenswrapper[5021]: I0121 15:41:03.875344 5021 scope.go:117] "RemoveContainer" containerID="d8c1c6a011af554208851fabd8c0745eff3580c8c6333e5e9eae35bca25a4b7b" Jan 21 15:41:03 crc kubenswrapper[5021]: I0121 15:41:03.875398 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9tpnf" Jan 21 15:41:03 crc kubenswrapper[5021]: I0121 15:41:03.901040 5021 scope.go:117] "RemoveContainer" containerID="86d1060c3a13fd9fe214ca368a6dd47b1f340800081d3bde71963495c88a31b3" Jan 21 15:41:03 crc kubenswrapper[5021]: I0121 15:41:03.914431 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-9tpnf"] Jan 21 15:41:03 crc kubenswrapper[5021]: I0121 15:41:03.921201 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-9tpnf"] Jan 21 15:41:03 crc kubenswrapper[5021]: I0121 15:41:03.941757 5021 scope.go:117] "RemoveContainer" containerID="de3c7f812d08c0314aeabaf6c89f0044d5c4e65775dbd03f394379ab339d6d6e" Jan 21 15:41:03 crc kubenswrapper[5021]: I0121 15:41:03.964361 5021 scope.go:117] "RemoveContainer" containerID="d8c1c6a011af554208851fabd8c0745eff3580c8c6333e5e9eae35bca25a4b7b" Jan 21 15:41:03 crc kubenswrapper[5021]: E0121 15:41:03.965414 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d8c1c6a011af554208851fabd8c0745eff3580c8c6333e5e9eae35bca25a4b7b\": container with ID starting with d8c1c6a011af554208851fabd8c0745eff3580c8c6333e5e9eae35bca25a4b7b not found: ID does not exist" containerID="d8c1c6a011af554208851fabd8c0745eff3580c8c6333e5e9eae35bca25a4b7b" Jan 21 15:41:03 crc kubenswrapper[5021]: I0121 15:41:03.965459 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d8c1c6a011af554208851fabd8c0745eff3580c8c6333e5e9eae35bca25a4b7b"} err="failed to get container status \"d8c1c6a011af554208851fabd8c0745eff3580c8c6333e5e9eae35bca25a4b7b\": rpc error: code = NotFound desc = could not find container \"d8c1c6a011af554208851fabd8c0745eff3580c8c6333e5e9eae35bca25a4b7b\": container with ID starting with d8c1c6a011af554208851fabd8c0745eff3580c8c6333e5e9eae35bca25a4b7b not found: ID does not exist" Jan 21 15:41:03 crc kubenswrapper[5021]: I0121 15:41:03.965492 5021 scope.go:117] "RemoveContainer" containerID="86d1060c3a13fd9fe214ca368a6dd47b1f340800081d3bde71963495c88a31b3" Jan 21 15:41:03 crc kubenswrapper[5021]: E0121 15:41:03.965880 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"86d1060c3a13fd9fe214ca368a6dd47b1f340800081d3bde71963495c88a31b3\": container with ID starting with 86d1060c3a13fd9fe214ca368a6dd47b1f340800081d3bde71963495c88a31b3 not found: ID does not exist" containerID="86d1060c3a13fd9fe214ca368a6dd47b1f340800081d3bde71963495c88a31b3" Jan 21 15:41:03 crc kubenswrapper[5021]: I0121 15:41:03.965926 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86d1060c3a13fd9fe214ca368a6dd47b1f340800081d3bde71963495c88a31b3"} err="failed to get container status \"86d1060c3a13fd9fe214ca368a6dd47b1f340800081d3bde71963495c88a31b3\": rpc error: code = NotFound desc = could not find container \"86d1060c3a13fd9fe214ca368a6dd47b1f340800081d3bde71963495c88a31b3\": container with ID starting with 86d1060c3a13fd9fe214ca368a6dd47b1f340800081d3bde71963495c88a31b3 not found: ID does not exist" Jan 21 15:41:03 crc kubenswrapper[5021]: I0121 15:41:03.965941 5021 scope.go:117] "RemoveContainer" containerID="de3c7f812d08c0314aeabaf6c89f0044d5c4e65775dbd03f394379ab339d6d6e" Jan 21 15:41:03 crc kubenswrapper[5021]: E0121 15:41:03.966340 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"de3c7f812d08c0314aeabaf6c89f0044d5c4e65775dbd03f394379ab339d6d6e\": container with ID starting with de3c7f812d08c0314aeabaf6c89f0044d5c4e65775dbd03f394379ab339d6d6e not found: ID does not exist" containerID="de3c7f812d08c0314aeabaf6c89f0044d5c4e65775dbd03f394379ab339d6d6e" Jan 21 15:41:03 crc kubenswrapper[5021]: I0121 15:41:03.966379 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"de3c7f812d08c0314aeabaf6c89f0044d5c4e65775dbd03f394379ab339d6d6e"} err="failed to get container status \"de3c7f812d08c0314aeabaf6c89f0044d5c4e65775dbd03f394379ab339d6d6e\": rpc error: code = NotFound desc = could not find container \"de3c7f812d08c0314aeabaf6c89f0044d5c4e65775dbd03f394379ab339d6d6e\": container with ID starting with de3c7f812d08c0314aeabaf6c89f0044d5c4e65775dbd03f394379ab339d6d6e not found: ID does not exist" Jan 21 15:41:04 crc kubenswrapper[5021]: I0121 15:41:04.698120 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-77c48c7859-njwdd" Jan 21 15:41:04 crc kubenswrapper[5021]: I0121 15:41:04.752598 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7e230411-33c2-44ef-b28f-8833c893c0eb" path="/var/lib/kubelet/pods/7e230411-33c2-44ef-b28f-8833c893c0eb/volumes" Jan 21 15:41:08 crc kubenswrapper[5021]: I0121 15:41:08.909231 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854sp4g6" event={"ID":"25709945-8415-492c-a829-fd79f3fbe521","Type":"ContainerStarted","Data":"6ca31fa1fe12387c5721316746a8a3a65665186cd8739424658d476d17e67937"} Jan 21 15:41:08 crc kubenswrapper[5021]: I0121 15:41:08.909829 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854sp4g6" Jan 21 15:41:08 crc kubenswrapper[5021]: I0121 15:41:08.937429 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854sp4g6" podStartSLOduration=28.608908212 podStartE2EDuration="1m6.937412444s" podCreationTimestamp="2026-01-21 15:40:02 +0000 UTC" firstStartedPulling="2026-01-21 15:40:30.248073335 +0000 UTC m=+971.783187224" lastFinishedPulling="2026-01-21 15:41:08.576577567 +0000 UTC m=+1010.111691456" observedRunningTime="2026-01-21 15:41:08.934596167 +0000 UTC m=+1010.469710056" watchObservedRunningTime="2026-01-21 15:41:08.937412444 +0000 UTC m=+1010.472526333" Jan 21 15:41:19 crc kubenswrapper[5021]: I0121 15:41:19.158996 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854sp4g6" Jan 21 15:41:34 crc kubenswrapper[5021]: I0121 15:41:34.138642 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-b5c89"] Jan 21 15:41:34 crc kubenswrapper[5021]: E0121 15:41:34.139580 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e230411-33c2-44ef-b28f-8833c893c0eb" containerName="registry-server" Jan 21 15:41:34 crc kubenswrapper[5021]: I0121 15:41:34.139597 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e230411-33c2-44ef-b28f-8833c893c0eb" containerName="registry-server" Jan 21 15:41:34 crc kubenswrapper[5021]: E0121 15:41:34.139613 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e230411-33c2-44ef-b28f-8833c893c0eb" containerName="extract-utilities" Jan 21 15:41:34 crc kubenswrapper[5021]: I0121 15:41:34.139620 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e230411-33c2-44ef-b28f-8833c893c0eb" containerName="extract-utilities" Jan 21 15:41:34 crc kubenswrapper[5021]: E0121 15:41:34.139642 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e230411-33c2-44ef-b28f-8833c893c0eb" containerName="extract-content" Jan 21 15:41:34 crc kubenswrapper[5021]: I0121 15:41:34.139650 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e230411-33c2-44ef-b28f-8833c893c0eb" containerName="extract-content" Jan 21 15:41:34 crc kubenswrapper[5021]: I0121 15:41:34.139805 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e230411-33c2-44ef-b28f-8833c893c0eb" containerName="registry-server" Jan 21 15:41:34 crc kubenswrapper[5021]: I0121 15:41:34.141163 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-b5c89" Jan 21 15:41:34 crc kubenswrapper[5021]: I0121 15:41:34.149755 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Jan 21 15:41:34 crc kubenswrapper[5021]: I0121 15:41:34.151235 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-vvlgf" Jan 21 15:41:34 crc kubenswrapper[5021]: I0121 15:41:34.151356 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Jan 21 15:41:34 crc kubenswrapper[5021]: I0121 15:41:34.151450 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Jan 21 15:41:34 crc kubenswrapper[5021]: I0121 15:41:34.154272 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-b5c89"] Jan 21 15:41:34 crc kubenswrapper[5021]: I0121 15:41:34.187661 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-f7572"] Jan 21 15:41:34 crc kubenswrapper[5021]: I0121 15:41:34.189982 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-f7572" Jan 21 15:41:34 crc kubenswrapper[5021]: I0121 15:41:34.195301 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Jan 21 15:41:34 crc kubenswrapper[5021]: I0121 15:41:34.199448 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-f7572"] Jan 21 15:41:34 crc kubenswrapper[5021]: I0121 15:41:34.209395 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed1e48ab-1f58-4eab-8702-a9c6a226ed8d-config\") pod \"dnsmasq-dns-78dd6ddcc-f7572\" (UID: \"ed1e48ab-1f58-4eab-8702-a9c6a226ed8d\") " pod="openstack/dnsmasq-dns-78dd6ddcc-f7572" Jan 21 15:41:34 crc kubenswrapper[5021]: I0121 15:41:34.209661 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kdxm9\" (UniqueName: \"kubernetes.io/projected/ed1e48ab-1f58-4eab-8702-a9c6a226ed8d-kube-api-access-kdxm9\") pod \"dnsmasq-dns-78dd6ddcc-f7572\" (UID: \"ed1e48ab-1f58-4eab-8702-a9c6a226ed8d\") " pod="openstack/dnsmasq-dns-78dd6ddcc-f7572" Jan 21 15:41:34 crc kubenswrapper[5021]: I0121 15:41:34.209741 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/41321a59-ac12-4265-b73e-55946ab813ad-config\") pod \"dnsmasq-dns-675f4bcbfc-b5c89\" (UID: \"41321a59-ac12-4265-b73e-55946ab813ad\") " pod="openstack/dnsmasq-dns-675f4bcbfc-b5c89" Jan 21 15:41:34 crc kubenswrapper[5021]: I0121 15:41:34.209762 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tvqxs\" (UniqueName: \"kubernetes.io/projected/41321a59-ac12-4265-b73e-55946ab813ad-kube-api-access-tvqxs\") pod \"dnsmasq-dns-675f4bcbfc-b5c89\" (UID: \"41321a59-ac12-4265-b73e-55946ab813ad\") " pod="openstack/dnsmasq-dns-675f4bcbfc-b5c89" Jan 21 15:41:34 crc kubenswrapper[5021]: I0121 15:41:34.209802 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ed1e48ab-1f58-4eab-8702-a9c6a226ed8d-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-f7572\" (UID: \"ed1e48ab-1f58-4eab-8702-a9c6a226ed8d\") " pod="openstack/dnsmasq-dns-78dd6ddcc-f7572" Jan 21 15:41:34 crc kubenswrapper[5021]: I0121 15:41:34.311081 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed1e48ab-1f58-4eab-8702-a9c6a226ed8d-config\") pod \"dnsmasq-dns-78dd6ddcc-f7572\" (UID: \"ed1e48ab-1f58-4eab-8702-a9c6a226ed8d\") " pod="openstack/dnsmasq-dns-78dd6ddcc-f7572" Jan 21 15:41:34 crc kubenswrapper[5021]: I0121 15:41:34.311157 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kdxm9\" (UniqueName: \"kubernetes.io/projected/ed1e48ab-1f58-4eab-8702-a9c6a226ed8d-kube-api-access-kdxm9\") pod \"dnsmasq-dns-78dd6ddcc-f7572\" (UID: \"ed1e48ab-1f58-4eab-8702-a9c6a226ed8d\") " pod="openstack/dnsmasq-dns-78dd6ddcc-f7572" Jan 21 15:41:34 crc kubenswrapper[5021]: I0121 15:41:34.311213 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/41321a59-ac12-4265-b73e-55946ab813ad-config\") pod \"dnsmasq-dns-675f4bcbfc-b5c89\" (UID: \"41321a59-ac12-4265-b73e-55946ab813ad\") " pod="openstack/dnsmasq-dns-675f4bcbfc-b5c89" Jan 21 15:41:34 crc kubenswrapper[5021]: I0121 15:41:34.311231 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tvqxs\" (UniqueName: \"kubernetes.io/projected/41321a59-ac12-4265-b73e-55946ab813ad-kube-api-access-tvqxs\") pod \"dnsmasq-dns-675f4bcbfc-b5c89\" (UID: \"41321a59-ac12-4265-b73e-55946ab813ad\") " pod="openstack/dnsmasq-dns-675f4bcbfc-b5c89" Jan 21 15:41:34 crc kubenswrapper[5021]: I0121 15:41:34.311256 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ed1e48ab-1f58-4eab-8702-a9c6a226ed8d-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-f7572\" (UID: \"ed1e48ab-1f58-4eab-8702-a9c6a226ed8d\") " pod="openstack/dnsmasq-dns-78dd6ddcc-f7572" Jan 21 15:41:34 crc kubenswrapper[5021]: I0121 15:41:34.312119 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed1e48ab-1f58-4eab-8702-a9c6a226ed8d-config\") pod \"dnsmasq-dns-78dd6ddcc-f7572\" (UID: \"ed1e48ab-1f58-4eab-8702-a9c6a226ed8d\") " pod="openstack/dnsmasq-dns-78dd6ddcc-f7572" Jan 21 15:41:34 crc kubenswrapper[5021]: I0121 15:41:34.312161 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ed1e48ab-1f58-4eab-8702-a9c6a226ed8d-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-f7572\" (UID: \"ed1e48ab-1f58-4eab-8702-a9c6a226ed8d\") " pod="openstack/dnsmasq-dns-78dd6ddcc-f7572" Jan 21 15:41:34 crc kubenswrapper[5021]: I0121 15:41:34.312223 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/41321a59-ac12-4265-b73e-55946ab813ad-config\") pod \"dnsmasq-dns-675f4bcbfc-b5c89\" (UID: \"41321a59-ac12-4265-b73e-55946ab813ad\") " pod="openstack/dnsmasq-dns-675f4bcbfc-b5c89" Jan 21 15:41:34 crc kubenswrapper[5021]: I0121 15:41:34.332226 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kdxm9\" (UniqueName: \"kubernetes.io/projected/ed1e48ab-1f58-4eab-8702-a9c6a226ed8d-kube-api-access-kdxm9\") pod \"dnsmasq-dns-78dd6ddcc-f7572\" (UID: \"ed1e48ab-1f58-4eab-8702-a9c6a226ed8d\") " pod="openstack/dnsmasq-dns-78dd6ddcc-f7572" Jan 21 15:41:34 crc kubenswrapper[5021]: I0121 15:41:34.332266 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tvqxs\" (UniqueName: \"kubernetes.io/projected/41321a59-ac12-4265-b73e-55946ab813ad-kube-api-access-tvqxs\") pod \"dnsmasq-dns-675f4bcbfc-b5c89\" (UID: \"41321a59-ac12-4265-b73e-55946ab813ad\") " pod="openstack/dnsmasq-dns-675f4bcbfc-b5c89" Jan 21 15:41:34 crc kubenswrapper[5021]: I0121 15:41:34.461669 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-b5c89" Jan 21 15:41:34 crc kubenswrapper[5021]: I0121 15:41:34.514186 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-f7572" Jan 21 15:41:34 crc kubenswrapper[5021]: I0121 15:41:34.946237 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-b5c89"] Jan 21 15:41:35 crc kubenswrapper[5021]: W0121 15:41:35.023047 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poded1e48ab_1f58_4eab_8702_a9c6a226ed8d.slice/crio-e1e298273d9c6b8701ec27ed0675a57342d3cfbdfc716feb245dd2feaddaf250 WatchSource:0}: Error finding container e1e298273d9c6b8701ec27ed0675a57342d3cfbdfc716feb245dd2feaddaf250: Status 404 returned error can't find the container with id e1e298273d9c6b8701ec27ed0675a57342d3cfbdfc716feb245dd2feaddaf250 Jan 21 15:41:35 crc kubenswrapper[5021]: I0121 15:41:35.025662 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-f7572"] Jan 21 15:41:35 crc kubenswrapper[5021]: I0121 15:41:35.109276 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-b5c89" event={"ID":"41321a59-ac12-4265-b73e-55946ab813ad","Type":"ContainerStarted","Data":"0036bb7c7203555b27abc857e3491679636e18a9ea36b266c818c44391e4f4cb"} Jan 21 15:41:35 crc kubenswrapper[5021]: I0121 15:41:35.110486 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-f7572" event={"ID":"ed1e48ab-1f58-4eab-8702-a9c6a226ed8d","Type":"ContainerStarted","Data":"e1e298273d9c6b8701ec27ed0675a57342d3cfbdfc716feb245dd2feaddaf250"} Jan 21 15:41:36 crc kubenswrapper[5021]: I0121 15:41:36.517610 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-b5c89"] Jan 21 15:41:36 crc kubenswrapper[5021]: I0121 15:41:36.547075 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-j8m5p"] Jan 21 15:41:36 crc kubenswrapper[5021]: I0121 15:41:36.549678 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-j8m5p" Jan 21 15:41:36 crc kubenswrapper[5021]: I0121 15:41:36.554786 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-j8m5p"] Jan 21 15:41:36 crc kubenswrapper[5021]: I0121 15:41:36.554921 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f85328a1-6c80-4408-8aa4-9be1197f3810-dns-svc\") pod \"dnsmasq-dns-666b6646f7-j8m5p\" (UID: \"f85328a1-6c80-4408-8aa4-9be1197f3810\") " pod="openstack/dnsmasq-dns-666b6646f7-j8m5p" Jan 21 15:41:36 crc kubenswrapper[5021]: I0121 15:41:36.555390 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f85328a1-6c80-4408-8aa4-9be1197f3810-config\") pod \"dnsmasq-dns-666b6646f7-j8m5p\" (UID: \"f85328a1-6c80-4408-8aa4-9be1197f3810\") " pod="openstack/dnsmasq-dns-666b6646f7-j8m5p" Jan 21 15:41:36 crc kubenswrapper[5021]: I0121 15:41:36.555474 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7hkjf\" (UniqueName: \"kubernetes.io/projected/f85328a1-6c80-4408-8aa4-9be1197f3810-kube-api-access-7hkjf\") pod \"dnsmasq-dns-666b6646f7-j8m5p\" (UID: \"f85328a1-6c80-4408-8aa4-9be1197f3810\") " pod="openstack/dnsmasq-dns-666b6646f7-j8m5p" Jan 21 15:41:36 crc kubenswrapper[5021]: I0121 15:41:36.657036 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f85328a1-6c80-4408-8aa4-9be1197f3810-dns-svc\") pod \"dnsmasq-dns-666b6646f7-j8m5p\" (UID: \"f85328a1-6c80-4408-8aa4-9be1197f3810\") " pod="openstack/dnsmasq-dns-666b6646f7-j8m5p" Jan 21 15:41:36 crc kubenswrapper[5021]: I0121 15:41:36.657157 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f85328a1-6c80-4408-8aa4-9be1197f3810-config\") pod \"dnsmasq-dns-666b6646f7-j8m5p\" (UID: \"f85328a1-6c80-4408-8aa4-9be1197f3810\") " pod="openstack/dnsmasq-dns-666b6646f7-j8m5p" Jan 21 15:41:36 crc kubenswrapper[5021]: I0121 15:41:36.657176 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7hkjf\" (UniqueName: \"kubernetes.io/projected/f85328a1-6c80-4408-8aa4-9be1197f3810-kube-api-access-7hkjf\") pod \"dnsmasq-dns-666b6646f7-j8m5p\" (UID: \"f85328a1-6c80-4408-8aa4-9be1197f3810\") " pod="openstack/dnsmasq-dns-666b6646f7-j8m5p" Jan 21 15:41:36 crc kubenswrapper[5021]: I0121 15:41:36.658801 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f85328a1-6c80-4408-8aa4-9be1197f3810-dns-svc\") pod \"dnsmasq-dns-666b6646f7-j8m5p\" (UID: \"f85328a1-6c80-4408-8aa4-9be1197f3810\") " pod="openstack/dnsmasq-dns-666b6646f7-j8m5p" Jan 21 15:41:36 crc kubenswrapper[5021]: I0121 15:41:36.658856 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f85328a1-6c80-4408-8aa4-9be1197f3810-config\") pod \"dnsmasq-dns-666b6646f7-j8m5p\" (UID: \"f85328a1-6c80-4408-8aa4-9be1197f3810\") " pod="openstack/dnsmasq-dns-666b6646f7-j8m5p" Jan 21 15:41:36 crc kubenswrapper[5021]: I0121 15:41:36.690319 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7hkjf\" (UniqueName: \"kubernetes.io/projected/f85328a1-6c80-4408-8aa4-9be1197f3810-kube-api-access-7hkjf\") pod \"dnsmasq-dns-666b6646f7-j8m5p\" (UID: \"f85328a1-6c80-4408-8aa4-9be1197f3810\") " pod="openstack/dnsmasq-dns-666b6646f7-j8m5p" Jan 21 15:41:36 crc kubenswrapper[5021]: I0121 15:41:36.867201 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-j8m5p" Jan 21 15:41:36 crc kubenswrapper[5021]: I0121 15:41:36.991404 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-f7572"] Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.021738 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-ldkq4"] Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.023063 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-ldkq4" Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.040373 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-ldkq4"] Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.065896 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3153e20e-a5d1-4e0e-a27f-8b4e63ce0561-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-ldkq4\" (UID: \"3153e20e-a5d1-4e0e-a27f-8b4e63ce0561\") " pod="openstack/dnsmasq-dns-57d769cc4f-ldkq4" Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.066097 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9ldmn\" (UniqueName: \"kubernetes.io/projected/3153e20e-a5d1-4e0e-a27f-8b4e63ce0561-kube-api-access-9ldmn\") pod \"dnsmasq-dns-57d769cc4f-ldkq4\" (UID: \"3153e20e-a5d1-4e0e-a27f-8b4e63ce0561\") " pod="openstack/dnsmasq-dns-57d769cc4f-ldkq4" Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.066216 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3153e20e-a5d1-4e0e-a27f-8b4e63ce0561-config\") pod \"dnsmasq-dns-57d769cc4f-ldkq4\" (UID: \"3153e20e-a5d1-4e0e-a27f-8b4e63ce0561\") " pod="openstack/dnsmasq-dns-57d769cc4f-ldkq4" Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.174929 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3153e20e-a5d1-4e0e-a27f-8b4e63ce0561-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-ldkq4\" (UID: \"3153e20e-a5d1-4e0e-a27f-8b4e63ce0561\") " pod="openstack/dnsmasq-dns-57d769cc4f-ldkq4" Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.175679 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9ldmn\" (UniqueName: \"kubernetes.io/projected/3153e20e-a5d1-4e0e-a27f-8b4e63ce0561-kube-api-access-9ldmn\") pod \"dnsmasq-dns-57d769cc4f-ldkq4\" (UID: \"3153e20e-a5d1-4e0e-a27f-8b4e63ce0561\") " pod="openstack/dnsmasq-dns-57d769cc4f-ldkq4" Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.175836 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3153e20e-a5d1-4e0e-a27f-8b4e63ce0561-config\") pod \"dnsmasq-dns-57d769cc4f-ldkq4\" (UID: \"3153e20e-a5d1-4e0e-a27f-8b4e63ce0561\") " pod="openstack/dnsmasq-dns-57d769cc4f-ldkq4" Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.176126 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3153e20e-a5d1-4e0e-a27f-8b4e63ce0561-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-ldkq4\" (UID: \"3153e20e-a5d1-4e0e-a27f-8b4e63ce0561\") " pod="openstack/dnsmasq-dns-57d769cc4f-ldkq4" Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.176826 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3153e20e-a5d1-4e0e-a27f-8b4e63ce0561-config\") pod \"dnsmasq-dns-57d769cc4f-ldkq4\" (UID: \"3153e20e-a5d1-4e0e-a27f-8b4e63ce0561\") " pod="openstack/dnsmasq-dns-57d769cc4f-ldkq4" Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.200805 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9ldmn\" (UniqueName: \"kubernetes.io/projected/3153e20e-a5d1-4e0e-a27f-8b4e63ce0561-kube-api-access-9ldmn\") pod \"dnsmasq-dns-57d769cc4f-ldkq4\" (UID: \"3153e20e-a5d1-4e0e-a27f-8b4e63ce0561\") " pod="openstack/dnsmasq-dns-57d769cc4f-ldkq4" Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.381106 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-ldkq4" Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.546939 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-j8m5p"] Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.697643 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.700143 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.704366 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.704449 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.704610 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.704628 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-jtfc2" Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.704761 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.705073 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.705206 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.718898 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.877754 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-ldkq4"] Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.888393 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-server-0\" (UID: \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\") " pod="openstack/rabbitmq-server-0" Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.888468 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\") " pod="openstack/rabbitmq-server-0" Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.888490 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-server-conf\") pod \"rabbitmq-server-0\" (UID: \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\") " pod="openstack/rabbitmq-server-0" Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.888511 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\") " pod="openstack/rabbitmq-server-0" Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.888534 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\") " pod="openstack/rabbitmq-server-0" Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.888549 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\") " pod="openstack/rabbitmq-server-0" Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.888568 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-config-data\") pod \"rabbitmq-server-0\" (UID: \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\") " pod="openstack/rabbitmq-server-0" Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.888620 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\") " pod="openstack/rabbitmq-server-0" Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.888635 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\") " pod="openstack/rabbitmq-server-0" Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.888665 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-pod-info\") pod \"rabbitmq-server-0\" (UID: \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\") " pod="openstack/rabbitmq-server-0" Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.888692 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hrggg\" (UniqueName: \"kubernetes.io/projected/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-kube-api-access-hrggg\") pod \"rabbitmq-server-0\" (UID: \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\") " pod="openstack/rabbitmq-server-0" Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.990138 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-server-conf\") pod \"rabbitmq-server-0\" (UID: \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\") " pod="openstack/rabbitmq-server-0" Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.990188 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\") " pod="openstack/rabbitmq-server-0" Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.990226 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\") " pod="openstack/rabbitmq-server-0" Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.990250 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\") " pod="openstack/rabbitmq-server-0" Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.990276 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-config-data\") pod \"rabbitmq-server-0\" (UID: \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\") " pod="openstack/rabbitmq-server-0" Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.990339 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\") " pod="openstack/rabbitmq-server-0" Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.990360 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\") " pod="openstack/rabbitmq-server-0" Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.990396 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-pod-info\") pod \"rabbitmq-server-0\" (UID: \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\") " pod="openstack/rabbitmq-server-0" Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.990432 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hrggg\" (UniqueName: \"kubernetes.io/projected/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-kube-api-access-hrggg\") pod \"rabbitmq-server-0\" (UID: \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\") " pod="openstack/rabbitmq-server-0" Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.990461 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-server-0\" (UID: \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\") " pod="openstack/rabbitmq-server-0" Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.990502 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\") " pod="openstack/rabbitmq-server-0" Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.991325 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\") " pod="openstack/rabbitmq-server-0" Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.991774 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\") " pod="openstack/rabbitmq-server-0" Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.991954 5021 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-server-0\" (UID: \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/rabbitmq-server-0" Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.994803 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\") " pod="openstack/rabbitmq-server-0" Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.995210 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-config-data\") pod \"rabbitmq-server-0\" (UID: \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\") " pod="openstack/rabbitmq-server-0" Jan 21 15:41:37 crc kubenswrapper[5021]: I0121 15:41:37.997311 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-server-conf\") pod \"rabbitmq-server-0\" (UID: \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\") " pod="openstack/rabbitmq-server-0" Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.001460 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\") " pod="openstack/rabbitmq-server-0" Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.002872 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\") " pod="openstack/rabbitmq-server-0" Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.008205 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\") " pod="openstack/rabbitmq-server-0" Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.014664 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-pod-info\") pod \"rabbitmq-server-0\" (UID: \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\") " pod="openstack/rabbitmq-server-0" Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.021852 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hrggg\" (UniqueName: \"kubernetes.io/projected/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-kube-api-access-hrggg\") pod \"rabbitmq-server-0\" (UID: \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\") " pod="openstack/rabbitmq-server-0" Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.052833 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-server-0\" (UID: \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\") " pod="openstack/rabbitmq-server-0" Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.146488 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-j8m5p" event={"ID":"f85328a1-6c80-4408-8aa4-9be1197f3810","Type":"ContainerStarted","Data":"7ece7a94b45a62c7fa69fc4fcc435e39c9c46a2f4270daae7bc35f2cdeedec52"} Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.148213 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-ldkq4" event={"ID":"3153e20e-a5d1-4e0e-a27f-8b4e63ce0561","Type":"ContainerStarted","Data":"3f17cedb01df4e5fbe5cce320c515e12aa7ba76701343000bf36c2c8d2f7a642"} Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.163222 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.164809 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.175230 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.175317 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-tbrcf" Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.175352 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.175401 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.175467 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.175490 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.175726 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.176665 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.296399 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\") " pod="openstack/rabbitmq-cell1-server-0" Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.296481 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\") " pod="openstack/rabbitmq-cell1-server-0" Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.296507 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\") " pod="openstack/rabbitmq-cell1-server-0" Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.296532 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vmjw7\" (UniqueName: \"kubernetes.io/projected/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-kube-api-access-vmjw7\") pod \"rabbitmq-cell1-server-0\" (UID: \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\") " pod="openstack/rabbitmq-cell1-server-0" Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.296557 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\") " pod="openstack/rabbitmq-cell1-server-0" Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.296572 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\") " pod="openstack/rabbitmq-cell1-server-0" Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.296851 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\") " pod="openstack/rabbitmq-cell1-server-0" Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.296990 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\") " pod="openstack/rabbitmq-cell1-server-0" Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.297034 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\") " pod="openstack/rabbitmq-cell1-server-0" Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.297069 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\") " pod="openstack/rabbitmq-cell1-server-0" Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.297094 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\") " pod="openstack/rabbitmq-cell1-server-0" Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.354832 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.399747 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\") " pod="openstack/rabbitmq-cell1-server-0" Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.399944 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vmjw7\" (UniqueName: \"kubernetes.io/projected/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-kube-api-access-vmjw7\") pod \"rabbitmq-cell1-server-0\" (UID: \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\") " pod="openstack/rabbitmq-cell1-server-0" Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.399990 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\") " pod="openstack/rabbitmq-cell1-server-0" Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.400008 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\") " pod="openstack/rabbitmq-cell1-server-0" Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.400050 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\") " pod="openstack/rabbitmq-cell1-server-0" Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.400073 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\") " pod="openstack/rabbitmq-cell1-server-0" Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.400277 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\") " pod="openstack/rabbitmq-cell1-server-0" Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.400318 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\") " pod="openstack/rabbitmq-cell1-server-0" Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.400336 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\") " pod="openstack/rabbitmq-cell1-server-0" Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.400368 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\") " pod="openstack/rabbitmq-cell1-server-0" Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.400446 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\") " pod="openstack/rabbitmq-cell1-server-0" Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.402537 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\") " pod="openstack/rabbitmq-cell1-server-0" Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.403135 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\") " pod="openstack/rabbitmq-cell1-server-0" Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.404628 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\") " pod="openstack/rabbitmq-cell1-server-0" Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.406986 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\") " pod="openstack/rabbitmq-cell1-server-0" Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.408156 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\") " pod="openstack/rabbitmq-cell1-server-0" Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.408617 5021 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/rabbitmq-cell1-server-0" Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.411656 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\") " pod="openstack/rabbitmq-cell1-server-0" Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.415675 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\") " pod="openstack/rabbitmq-cell1-server-0" Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.416479 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\") " pod="openstack/rabbitmq-cell1-server-0" Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.419363 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\") " pod="openstack/rabbitmq-cell1-server-0" Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.425891 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vmjw7\" (UniqueName: \"kubernetes.io/projected/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-kube-api-access-vmjw7\") pod \"rabbitmq-cell1-server-0\" (UID: \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\") " pod="openstack/rabbitmq-cell1-server-0" Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.467768 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\") " pod="openstack/rabbitmq-cell1-server-0" Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.499307 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 21 15:41:38 crc kubenswrapper[5021]: I0121 15:41:38.895844 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 21 15:41:38 crc kubenswrapper[5021]: W0121 15:41:38.940227 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb6b5666a_5cee_4b3d_9ac4_2c34962c1d9b.slice/crio-1c19d0536ad45b68d5cd75e86c8e9065109f53441517ef388a47279c31e271f0 WatchSource:0}: Error finding container 1c19d0536ad45b68d5cd75e86c8e9065109f53441517ef388a47279c31e271f0: Status 404 returned error can't find the container with id 1c19d0536ad45b68d5cd75e86c8e9065109f53441517ef388a47279c31e271f0 Jan 21 15:41:39 crc kubenswrapper[5021]: I0121 15:41:39.107862 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 21 15:41:39 crc kubenswrapper[5021]: W0121 15:41:39.132466 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2dff28e1_6d0f_4a7d_8fcf_0edf26e63825.slice/crio-18eda8ac2925e2cf2b28c3175241388e4946c1f363922b567487dfc58652b6bd WatchSource:0}: Error finding container 18eda8ac2925e2cf2b28c3175241388e4946c1f363922b567487dfc58652b6bd: Status 404 returned error can't find the container with id 18eda8ac2925e2cf2b28c3175241388e4946c1f363922b567487dfc58652b6bd Jan 21 15:41:39 crc kubenswrapper[5021]: I0121 15:41:39.179024 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b","Type":"ContainerStarted","Data":"1c19d0536ad45b68d5cd75e86c8e9065109f53441517ef388a47279c31e271f0"} Jan 21 15:41:39 crc kubenswrapper[5021]: I0121 15:41:39.188840 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825","Type":"ContainerStarted","Data":"18eda8ac2925e2cf2b28c3175241388e4946c1f363922b567487dfc58652b6bd"} Jan 21 15:41:39 crc kubenswrapper[5021]: I0121 15:41:39.762348 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Jan 21 15:41:39 crc kubenswrapper[5021]: I0121 15:41:39.765358 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 21 15:41:39 crc kubenswrapper[5021]: I0121 15:41:39.774307 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Jan 21 15:41:39 crc kubenswrapper[5021]: I0121 15:41:39.775163 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Jan 21 15:41:39 crc kubenswrapper[5021]: I0121 15:41:39.775251 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-9bchp" Jan 21 15:41:39 crc kubenswrapper[5021]: I0121 15:41:39.778129 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Jan 21 15:41:39 crc kubenswrapper[5021]: I0121 15:41:39.784943 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Jan 21 15:41:39 crc kubenswrapper[5021]: I0121 15:41:39.801123 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Jan 21 15:41:39 crc kubenswrapper[5021]: I0121 15:41:39.945768 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/4184ba08-6582-4367-abd3-9e9cffb5b716-config-data-default\") pod \"openstack-galera-0\" (UID: \"4184ba08-6582-4367-abd3-9e9cffb5b716\") " pod="openstack/openstack-galera-0" Jan 21 15:41:39 crc kubenswrapper[5021]: I0121 15:41:39.946162 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/4184ba08-6582-4367-abd3-9e9cffb5b716-kolla-config\") pod \"openstack-galera-0\" (UID: \"4184ba08-6582-4367-abd3-9e9cffb5b716\") " pod="openstack/openstack-galera-0" Jan 21 15:41:39 crc kubenswrapper[5021]: I0121 15:41:39.946191 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/4184ba08-6582-4367-abd3-9e9cffb5b716-config-data-generated\") pod \"openstack-galera-0\" (UID: \"4184ba08-6582-4367-abd3-9e9cffb5b716\") " pod="openstack/openstack-galera-0" Jan 21 15:41:39 crc kubenswrapper[5021]: I0121 15:41:39.946207 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9k4rv\" (UniqueName: \"kubernetes.io/projected/4184ba08-6582-4367-abd3-9e9cffb5b716-kube-api-access-9k4rv\") pod \"openstack-galera-0\" (UID: \"4184ba08-6582-4367-abd3-9e9cffb5b716\") " pod="openstack/openstack-galera-0" Jan 21 15:41:39 crc kubenswrapper[5021]: I0121 15:41:39.946227 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4184ba08-6582-4367-abd3-9e9cffb5b716-operator-scripts\") pod \"openstack-galera-0\" (UID: \"4184ba08-6582-4367-abd3-9e9cffb5b716\") " pod="openstack/openstack-galera-0" Jan 21 15:41:39 crc kubenswrapper[5021]: I0121 15:41:39.946302 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"openstack-galera-0\" (UID: \"4184ba08-6582-4367-abd3-9e9cffb5b716\") " pod="openstack/openstack-galera-0" Jan 21 15:41:39 crc kubenswrapper[5021]: I0121 15:41:39.946358 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/4184ba08-6582-4367-abd3-9e9cffb5b716-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"4184ba08-6582-4367-abd3-9e9cffb5b716\") " pod="openstack/openstack-galera-0" Jan 21 15:41:39 crc kubenswrapper[5021]: I0121 15:41:39.946379 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4184ba08-6582-4367-abd3-9e9cffb5b716-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"4184ba08-6582-4367-abd3-9e9cffb5b716\") " pod="openstack/openstack-galera-0" Jan 21 15:41:40 crc kubenswrapper[5021]: I0121 15:41:40.049051 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/4184ba08-6582-4367-abd3-9e9cffb5b716-config-data-default\") pod \"openstack-galera-0\" (UID: \"4184ba08-6582-4367-abd3-9e9cffb5b716\") " pod="openstack/openstack-galera-0" Jan 21 15:41:40 crc kubenswrapper[5021]: I0121 15:41:40.049093 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/4184ba08-6582-4367-abd3-9e9cffb5b716-kolla-config\") pod \"openstack-galera-0\" (UID: \"4184ba08-6582-4367-abd3-9e9cffb5b716\") " pod="openstack/openstack-galera-0" Jan 21 15:41:40 crc kubenswrapper[5021]: I0121 15:41:40.049115 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/4184ba08-6582-4367-abd3-9e9cffb5b716-config-data-generated\") pod \"openstack-galera-0\" (UID: \"4184ba08-6582-4367-abd3-9e9cffb5b716\") " pod="openstack/openstack-galera-0" Jan 21 15:41:40 crc kubenswrapper[5021]: I0121 15:41:40.049136 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9k4rv\" (UniqueName: \"kubernetes.io/projected/4184ba08-6582-4367-abd3-9e9cffb5b716-kube-api-access-9k4rv\") pod \"openstack-galera-0\" (UID: \"4184ba08-6582-4367-abd3-9e9cffb5b716\") " pod="openstack/openstack-galera-0" Jan 21 15:41:40 crc kubenswrapper[5021]: I0121 15:41:40.049154 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4184ba08-6582-4367-abd3-9e9cffb5b716-operator-scripts\") pod \"openstack-galera-0\" (UID: \"4184ba08-6582-4367-abd3-9e9cffb5b716\") " pod="openstack/openstack-galera-0" Jan 21 15:41:40 crc kubenswrapper[5021]: I0121 15:41:40.049521 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"openstack-galera-0\" (UID: \"4184ba08-6582-4367-abd3-9e9cffb5b716\") " pod="openstack/openstack-galera-0" Jan 21 15:41:40 crc kubenswrapper[5021]: I0121 15:41:40.049582 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/4184ba08-6582-4367-abd3-9e9cffb5b716-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"4184ba08-6582-4367-abd3-9e9cffb5b716\") " pod="openstack/openstack-galera-0" Jan 21 15:41:40 crc kubenswrapper[5021]: I0121 15:41:40.049604 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4184ba08-6582-4367-abd3-9e9cffb5b716-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"4184ba08-6582-4367-abd3-9e9cffb5b716\") " pod="openstack/openstack-galera-0" Jan 21 15:41:40 crc kubenswrapper[5021]: I0121 15:41:40.050140 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/4184ba08-6582-4367-abd3-9e9cffb5b716-config-data-default\") pod \"openstack-galera-0\" (UID: \"4184ba08-6582-4367-abd3-9e9cffb5b716\") " pod="openstack/openstack-galera-0" Jan 21 15:41:40 crc kubenswrapper[5021]: I0121 15:41:40.050465 5021 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"openstack-galera-0\" (UID: \"4184ba08-6582-4367-abd3-9e9cffb5b716\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/openstack-galera-0" Jan 21 15:41:40 crc kubenswrapper[5021]: I0121 15:41:40.050777 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/4184ba08-6582-4367-abd3-9e9cffb5b716-kolla-config\") pod \"openstack-galera-0\" (UID: \"4184ba08-6582-4367-abd3-9e9cffb5b716\") " pod="openstack/openstack-galera-0" Jan 21 15:41:40 crc kubenswrapper[5021]: I0121 15:41:40.051163 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/4184ba08-6582-4367-abd3-9e9cffb5b716-config-data-generated\") pod \"openstack-galera-0\" (UID: \"4184ba08-6582-4367-abd3-9e9cffb5b716\") " pod="openstack/openstack-galera-0" Jan 21 15:41:40 crc kubenswrapper[5021]: I0121 15:41:40.052616 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4184ba08-6582-4367-abd3-9e9cffb5b716-operator-scripts\") pod \"openstack-galera-0\" (UID: \"4184ba08-6582-4367-abd3-9e9cffb5b716\") " pod="openstack/openstack-galera-0" Jan 21 15:41:40 crc kubenswrapper[5021]: I0121 15:41:40.056885 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/4184ba08-6582-4367-abd3-9e9cffb5b716-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"4184ba08-6582-4367-abd3-9e9cffb5b716\") " pod="openstack/openstack-galera-0" Jan 21 15:41:40 crc kubenswrapper[5021]: I0121 15:41:40.057629 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4184ba08-6582-4367-abd3-9e9cffb5b716-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"4184ba08-6582-4367-abd3-9e9cffb5b716\") " pod="openstack/openstack-galera-0" Jan 21 15:41:40 crc kubenswrapper[5021]: I0121 15:41:40.068582 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9k4rv\" (UniqueName: \"kubernetes.io/projected/4184ba08-6582-4367-abd3-9e9cffb5b716-kube-api-access-9k4rv\") pod \"openstack-galera-0\" (UID: \"4184ba08-6582-4367-abd3-9e9cffb5b716\") " pod="openstack/openstack-galera-0" Jan 21 15:41:40 crc kubenswrapper[5021]: I0121 15:41:40.099757 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"openstack-galera-0\" (UID: \"4184ba08-6582-4367-abd3-9e9cffb5b716\") " pod="openstack/openstack-galera-0" Jan 21 15:41:40 crc kubenswrapper[5021]: I0121 15:41:40.115425 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.143266 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.147852 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.150656 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-dvmbj" Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.151068 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.151387 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.156553 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.157052 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.270986 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1093d499-bd73-4de4-b999-a7e9835b3124-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"1093d499-bd73-4de4-b999-a7e9835b3124\") " pod="openstack/openstack-cell1-galera-0" Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.271039 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/1093d499-bd73-4de4-b999-a7e9835b3124-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"1093d499-bd73-4de4-b999-a7e9835b3124\") " pod="openstack/openstack-cell1-galera-0" Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.271081 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1093d499-bd73-4de4-b999-a7e9835b3124-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"1093d499-bd73-4de4-b999-a7e9835b3124\") " pod="openstack/openstack-cell1-galera-0" Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.271122 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nltwh\" (UniqueName: \"kubernetes.io/projected/1093d499-bd73-4de4-b999-a7e9835b3124-kube-api-access-nltwh\") pod \"openstack-cell1-galera-0\" (UID: \"1093d499-bd73-4de4-b999-a7e9835b3124\") " pod="openstack/openstack-cell1-galera-0" Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.271153 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"openstack-cell1-galera-0\" (UID: \"1093d499-bd73-4de4-b999-a7e9835b3124\") " pod="openstack/openstack-cell1-galera-0" Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.271173 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/1093d499-bd73-4de4-b999-a7e9835b3124-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"1093d499-bd73-4de4-b999-a7e9835b3124\") " pod="openstack/openstack-cell1-galera-0" Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.271209 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/1093d499-bd73-4de4-b999-a7e9835b3124-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"1093d499-bd73-4de4-b999-a7e9835b3124\") " pod="openstack/openstack-cell1-galera-0" Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.271227 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1093d499-bd73-4de4-b999-a7e9835b3124-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"1093d499-bd73-4de4-b999-a7e9835b3124\") " pod="openstack/openstack-cell1-galera-0" Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.367420 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.368384 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.372541 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-xv4l5" Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.372866 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.373046 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.373492 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/1093d499-bd73-4de4-b999-a7e9835b3124-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"1093d499-bd73-4de4-b999-a7e9835b3124\") " pod="openstack/openstack-cell1-galera-0" Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.373673 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1093d499-bd73-4de4-b999-a7e9835b3124-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"1093d499-bd73-4de4-b999-a7e9835b3124\") " pod="openstack/openstack-cell1-galera-0" Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.373711 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1093d499-bd73-4de4-b999-a7e9835b3124-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"1093d499-bd73-4de4-b999-a7e9835b3124\") " pod="openstack/openstack-cell1-galera-0" Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.373739 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/1093d499-bd73-4de4-b999-a7e9835b3124-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"1093d499-bd73-4de4-b999-a7e9835b3124\") " pod="openstack/openstack-cell1-galera-0" Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.373793 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1093d499-bd73-4de4-b999-a7e9835b3124-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"1093d499-bd73-4de4-b999-a7e9835b3124\") " pod="openstack/openstack-cell1-galera-0" Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.373846 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nltwh\" (UniqueName: \"kubernetes.io/projected/1093d499-bd73-4de4-b999-a7e9835b3124-kube-api-access-nltwh\") pod \"openstack-cell1-galera-0\" (UID: \"1093d499-bd73-4de4-b999-a7e9835b3124\") " pod="openstack/openstack-cell1-galera-0" Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.373888 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"openstack-cell1-galera-0\" (UID: \"1093d499-bd73-4de4-b999-a7e9835b3124\") " pod="openstack/openstack-cell1-galera-0" Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.373933 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/1093d499-bd73-4de4-b999-a7e9835b3124-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"1093d499-bd73-4de4-b999-a7e9835b3124\") " pod="openstack/openstack-cell1-galera-0" Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.374360 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/1093d499-bd73-4de4-b999-a7e9835b3124-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"1093d499-bd73-4de4-b999-a7e9835b3124\") " pod="openstack/openstack-cell1-galera-0" Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.381904 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/1093d499-bd73-4de4-b999-a7e9835b3124-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"1093d499-bd73-4de4-b999-a7e9835b3124\") " pod="openstack/openstack-cell1-galera-0" Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.382786 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1093d499-bd73-4de4-b999-a7e9835b3124-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"1093d499-bd73-4de4-b999-a7e9835b3124\") " pod="openstack/openstack-cell1-galera-0" Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.383137 5021 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"openstack-cell1-galera-0\" (UID: \"1093d499-bd73-4de4-b999-a7e9835b3124\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/openstack-cell1-galera-0" Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.383236 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1093d499-bd73-4de4-b999-a7e9835b3124-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"1093d499-bd73-4de4-b999-a7e9835b3124\") " pod="openstack/openstack-cell1-galera-0" Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.384519 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1093d499-bd73-4de4-b999-a7e9835b3124-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"1093d499-bd73-4de4-b999-a7e9835b3124\") " pod="openstack/openstack-cell1-galera-0" Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.397488 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.405988 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/1093d499-bd73-4de4-b999-a7e9835b3124-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"1093d499-bd73-4de4-b999-a7e9835b3124\") " pod="openstack/openstack-cell1-galera-0" Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.411255 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nltwh\" (UniqueName: \"kubernetes.io/projected/1093d499-bd73-4de4-b999-a7e9835b3124-kube-api-access-nltwh\") pod \"openstack-cell1-galera-0\" (UID: \"1093d499-bd73-4de4-b999-a7e9835b3124\") " pod="openstack/openstack-cell1-galera-0" Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.429837 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"openstack-cell1-galera-0\" (UID: \"1093d499-bd73-4de4-b999-a7e9835b3124\") " pod="openstack/openstack-cell1-galera-0" Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.474835 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/fb584a2d-b396-4850-a7b5-3d827c42fe5a-kolla-config\") pod \"memcached-0\" (UID: \"fb584a2d-b396-4850-a7b5-3d827c42fe5a\") " pod="openstack/memcached-0" Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.474893 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fb584a2d-b396-4850-a7b5-3d827c42fe5a-config-data\") pod \"memcached-0\" (UID: \"fb584a2d-b396-4850-a7b5-3d827c42fe5a\") " pod="openstack/memcached-0" Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.474967 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb584a2d-b396-4850-a7b5-3d827c42fe5a-combined-ca-bundle\") pod \"memcached-0\" (UID: \"fb584a2d-b396-4850-a7b5-3d827c42fe5a\") " pod="openstack/memcached-0" Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.475020 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xs784\" (UniqueName: \"kubernetes.io/projected/fb584a2d-b396-4850-a7b5-3d827c42fe5a-kube-api-access-xs784\") pod \"memcached-0\" (UID: \"fb584a2d-b396-4850-a7b5-3d827c42fe5a\") " pod="openstack/memcached-0" Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.475109 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/fb584a2d-b396-4850-a7b5-3d827c42fe5a-memcached-tls-certs\") pod \"memcached-0\" (UID: \"fb584a2d-b396-4850-a7b5-3d827c42fe5a\") " pod="openstack/memcached-0" Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.479219 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.577075 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/fb584a2d-b396-4850-a7b5-3d827c42fe5a-kolla-config\") pod \"memcached-0\" (UID: \"fb584a2d-b396-4850-a7b5-3d827c42fe5a\") " pod="openstack/memcached-0" Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.577155 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fb584a2d-b396-4850-a7b5-3d827c42fe5a-config-data\") pod \"memcached-0\" (UID: \"fb584a2d-b396-4850-a7b5-3d827c42fe5a\") " pod="openstack/memcached-0" Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.577219 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb584a2d-b396-4850-a7b5-3d827c42fe5a-combined-ca-bundle\") pod \"memcached-0\" (UID: \"fb584a2d-b396-4850-a7b5-3d827c42fe5a\") " pod="openstack/memcached-0" Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.577273 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xs784\" (UniqueName: \"kubernetes.io/projected/fb584a2d-b396-4850-a7b5-3d827c42fe5a-kube-api-access-xs784\") pod \"memcached-0\" (UID: \"fb584a2d-b396-4850-a7b5-3d827c42fe5a\") " pod="openstack/memcached-0" Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.577311 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/fb584a2d-b396-4850-a7b5-3d827c42fe5a-memcached-tls-certs\") pod \"memcached-0\" (UID: \"fb584a2d-b396-4850-a7b5-3d827c42fe5a\") " pod="openstack/memcached-0" Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.578574 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/fb584a2d-b396-4850-a7b5-3d827c42fe5a-kolla-config\") pod \"memcached-0\" (UID: \"fb584a2d-b396-4850-a7b5-3d827c42fe5a\") " pod="openstack/memcached-0" Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.578614 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fb584a2d-b396-4850-a7b5-3d827c42fe5a-config-data\") pod \"memcached-0\" (UID: \"fb584a2d-b396-4850-a7b5-3d827c42fe5a\") " pod="openstack/memcached-0" Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.586499 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/fb584a2d-b396-4850-a7b5-3d827c42fe5a-memcached-tls-certs\") pod \"memcached-0\" (UID: \"fb584a2d-b396-4850-a7b5-3d827c42fe5a\") " pod="openstack/memcached-0" Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.586631 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb584a2d-b396-4850-a7b5-3d827c42fe5a-combined-ca-bundle\") pod \"memcached-0\" (UID: \"fb584a2d-b396-4850-a7b5-3d827c42fe5a\") " pod="openstack/memcached-0" Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.596218 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xs784\" (UniqueName: \"kubernetes.io/projected/fb584a2d-b396-4850-a7b5-3d827c42fe5a-kube-api-access-xs784\") pod \"memcached-0\" (UID: \"fb584a2d-b396-4850-a7b5-3d827c42fe5a\") " pod="openstack/memcached-0" Jan 21 15:41:41 crc kubenswrapper[5021]: I0121 15:41:41.777226 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 21 15:41:43 crc kubenswrapper[5021]: I0121 15:41:43.027208 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Jan 21 15:41:43 crc kubenswrapper[5021]: I0121 15:41:43.028127 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 21 15:41:43 crc kubenswrapper[5021]: I0121 15:41:43.035398 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-xhjwp" Jan 21 15:41:43 crc kubenswrapper[5021]: I0121 15:41:43.037498 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 21 15:41:43 crc kubenswrapper[5021]: I0121 15:41:43.106492 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5rp5n\" (UniqueName: \"kubernetes.io/projected/a0bd89c3-8fac-437e-9be8-b0703dd1be4c-kube-api-access-5rp5n\") pod \"kube-state-metrics-0\" (UID: \"a0bd89c3-8fac-437e-9be8-b0703dd1be4c\") " pod="openstack/kube-state-metrics-0" Jan 21 15:41:43 crc kubenswrapper[5021]: I0121 15:41:43.208486 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5rp5n\" (UniqueName: \"kubernetes.io/projected/a0bd89c3-8fac-437e-9be8-b0703dd1be4c-kube-api-access-5rp5n\") pod \"kube-state-metrics-0\" (UID: \"a0bd89c3-8fac-437e-9be8-b0703dd1be4c\") " pod="openstack/kube-state-metrics-0" Jan 21 15:41:43 crc kubenswrapper[5021]: I0121 15:41:43.228969 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5rp5n\" (UniqueName: \"kubernetes.io/projected/a0bd89c3-8fac-437e-9be8-b0703dd1be4c-kube-api-access-5rp5n\") pod \"kube-state-metrics-0\" (UID: \"a0bd89c3-8fac-437e-9be8-b0703dd1be4c\") " pod="openstack/kube-state-metrics-0" Jan 21 15:41:43 crc kubenswrapper[5021]: I0121 15:41:43.347220 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.128194 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-xqkct"] Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.129691 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-xqkct" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.133124 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.133378 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.134368 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-27n72" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.142989 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-xqkct"] Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.186113 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/a5d30216-0406-4ff3-a645-880381c2a661-ovn-controller-tls-certs\") pod \"ovn-controller-xqkct\" (UID: \"a5d30216-0406-4ff3-a645-880381c2a661\") " pod="openstack/ovn-controller-xqkct" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.186181 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/a5d30216-0406-4ff3-a645-880381c2a661-var-run-ovn\") pod \"ovn-controller-xqkct\" (UID: \"a5d30216-0406-4ff3-a645-880381c2a661\") " pod="openstack/ovn-controller-xqkct" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.186221 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/a5d30216-0406-4ff3-a645-880381c2a661-var-run\") pod \"ovn-controller-xqkct\" (UID: \"a5d30216-0406-4ff3-a645-880381c2a661\") " pod="openstack/ovn-controller-xqkct" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.186257 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gzlln\" (UniqueName: \"kubernetes.io/projected/a5d30216-0406-4ff3-a645-880381c2a661-kube-api-access-gzlln\") pod \"ovn-controller-xqkct\" (UID: \"a5d30216-0406-4ff3-a645-880381c2a661\") " pod="openstack/ovn-controller-xqkct" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.186309 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/a5d30216-0406-4ff3-a645-880381c2a661-var-log-ovn\") pod \"ovn-controller-xqkct\" (UID: \"a5d30216-0406-4ff3-a645-880381c2a661\") " pod="openstack/ovn-controller-xqkct" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.186387 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a5d30216-0406-4ff3-a645-880381c2a661-scripts\") pod \"ovn-controller-xqkct\" (UID: \"a5d30216-0406-4ff3-a645-880381c2a661\") " pod="openstack/ovn-controller-xqkct" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.186415 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5d30216-0406-4ff3-a645-880381c2a661-combined-ca-bundle\") pod \"ovn-controller-xqkct\" (UID: \"a5d30216-0406-4ff3-a645-880381c2a661\") " pod="openstack/ovn-controller-xqkct" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.198887 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-bk98m"] Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.200589 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-bk98m" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.232946 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-bk98m"] Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.287331 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a5d30216-0406-4ff3-a645-880381c2a661-scripts\") pod \"ovn-controller-xqkct\" (UID: \"a5d30216-0406-4ff3-a645-880381c2a661\") " pod="openstack/ovn-controller-xqkct" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.287389 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5d30216-0406-4ff3-a645-880381c2a661-combined-ca-bundle\") pod \"ovn-controller-xqkct\" (UID: \"a5d30216-0406-4ff3-a645-880381c2a661\") " pod="openstack/ovn-controller-xqkct" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.287441 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/06ba8703-2573-4c30-82ec-36290cf378f4-scripts\") pod \"ovn-controller-ovs-bk98m\" (UID: \"06ba8703-2573-4c30-82ec-36290cf378f4\") " pod="openstack/ovn-controller-ovs-bk98m" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.287477 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/a5d30216-0406-4ff3-a645-880381c2a661-ovn-controller-tls-certs\") pod \"ovn-controller-xqkct\" (UID: \"a5d30216-0406-4ff3-a645-880381c2a661\") " pod="openstack/ovn-controller-xqkct" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.287516 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/a5d30216-0406-4ff3-a645-880381c2a661-var-run-ovn\") pod \"ovn-controller-xqkct\" (UID: \"a5d30216-0406-4ff3-a645-880381c2a661\") " pod="openstack/ovn-controller-xqkct" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.287546 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/06ba8703-2573-4c30-82ec-36290cf378f4-var-run\") pod \"ovn-controller-ovs-bk98m\" (UID: \"06ba8703-2573-4c30-82ec-36290cf378f4\") " pod="openstack/ovn-controller-ovs-bk98m" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.287584 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v5rlq\" (UniqueName: \"kubernetes.io/projected/06ba8703-2573-4c30-82ec-36290cf378f4-kube-api-access-v5rlq\") pod \"ovn-controller-ovs-bk98m\" (UID: \"06ba8703-2573-4c30-82ec-36290cf378f4\") " pod="openstack/ovn-controller-ovs-bk98m" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.287612 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/a5d30216-0406-4ff3-a645-880381c2a661-var-run\") pod \"ovn-controller-xqkct\" (UID: \"a5d30216-0406-4ff3-a645-880381c2a661\") " pod="openstack/ovn-controller-xqkct" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.287648 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/06ba8703-2573-4c30-82ec-36290cf378f4-etc-ovs\") pod \"ovn-controller-ovs-bk98m\" (UID: \"06ba8703-2573-4c30-82ec-36290cf378f4\") " pod="openstack/ovn-controller-ovs-bk98m" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.287676 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gzlln\" (UniqueName: \"kubernetes.io/projected/a5d30216-0406-4ff3-a645-880381c2a661-kube-api-access-gzlln\") pod \"ovn-controller-xqkct\" (UID: \"a5d30216-0406-4ff3-a645-880381c2a661\") " pod="openstack/ovn-controller-xqkct" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.287702 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/06ba8703-2573-4c30-82ec-36290cf378f4-var-lib\") pod \"ovn-controller-ovs-bk98m\" (UID: \"06ba8703-2573-4c30-82ec-36290cf378f4\") " pod="openstack/ovn-controller-ovs-bk98m" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.287724 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/06ba8703-2573-4c30-82ec-36290cf378f4-var-log\") pod \"ovn-controller-ovs-bk98m\" (UID: \"06ba8703-2573-4c30-82ec-36290cf378f4\") " pod="openstack/ovn-controller-ovs-bk98m" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.287772 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/a5d30216-0406-4ff3-a645-880381c2a661-var-log-ovn\") pod \"ovn-controller-xqkct\" (UID: \"a5d30216-0406-4ff3-a645-880381c2a661\") " pod="openstack/ovn-controller-xqkct" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.288173 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/a5d30216-0406-4ff3-a645-880381c2a661-var-run-ovn\") pod \"ovn-controller-xqkct\" (UID: \"a5d30216-0406-4ff3-a645-880381c2a661\") " pod="openstack/ovn-controller-xqkct" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.288216 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/a5d30216-0406-4ff3-a645-880381c2a661-var-run\") pod \"ovn-controller-xqkct\" (UID: \"a5d30216-0406-4ff3-a645-880381c2a661\") " pod="openstack/ovn-controller-xqkct" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.288257 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/a5d30216-0406-4ff3-a645-880381c2a661-var-log-ovn\") pod \"ovn-controller-xqkct\" (UID: \"a5d30216-0406-4ff3-a645-880381c2a661\") " pod="openstack/ovn-controller-xqkct" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.290059 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a5d30216-0406-4ff3-a645-880381c2a661-scripts\") pod \"ovn-controller-xqkct\" (UID: \"a5d30216-0406-4ff3-a645-880381c2a661\") " pod="openstack/ovn-controller-xqkct" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.296831 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5d30216-0406-4ff3-a645-880381c2a661-combined-ca-bundle\") pod \"ovn-controller-xqkct\" (UID: \"a5d30216-0406-4ff3-a645-880381c2a661\") " pod="openstack/ovn-controller-xqkct" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.297251 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/a5d30216-0406-4ff3-a645-880381c2a661-ovn-controller-tls-certs\") pod \"ovn-controller-xqkct\" (UID: \"a5d30216-0406-4ff3-a645-880381c2a661\") " pod="openstack/ovn-controller-xqkct" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.303282 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gzlln\" (UniqueName: \"kubernetes.io/projected/a5d30216-0406-4ff3-a645-880381c2a661-kube-api-access-gzlln\") pod \"ovn-controller-xqkct\" (UID: \"a5d30216-0406-4ff3-a645-880381c2a661\") " pod="openstack/ovn-controller-xqkct" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.389643 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/06ba8703-2573-4c30-82ec-36290cf378f4-scripts\") pod \"ovn-controller-ovs-bk98m\" (UID: \"06ba8703-2573-4c30-82ec-36290cf378f4\") " pod="openstack/ovn-controller-ovs-bk98m" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.389989 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/06ba8703-2573-4c30-82ec-36290cf378f4-var-run\") pod \"ovn-controller-ovs-bk98m\" (UID: \"06ba8703-2573-4c30-82ec-36290cf378f4\") " pod="openstack/ovn-controller-ovs-bk98m" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.390021 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v5rlq\" (UniqueName: \"kubernetes.io/projected/06ba8703-2573-4c30-82ec-36290cf378f4-kube-api-access-v5rlq\") pod \"ovn-controller-ovs-bk98m\" (UID: \"06ba8703-2573-4c30-82ec-36290cf378f4\") " pod="openstack/ovn-controller-ovs-bk98m" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.390050 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/06ba8703-2573-4c30-82ec-36290cf378f4-etc-ovs\") pod \"ovn-controller-ovs-bk98m\" (UID: \"06ba8703-2573-4c30-82ec-36290cf378f4\") " pod="openstack/ovn-controller-ovs-bk98m" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.390069 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/06ba8703-2573-4c30-82ec-36290cf378f4-var-lib\") pod \"ovn-controller-ovs-bk98m\" (UID: \"06ba8703-2573-4c30-82ec-36290cf378f4\") " pod="openstack/ovn-controller-ovs-bk98m" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.390085 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/06ba8703-2573-4c30-82ec-36290cf378f4-var-log\") pod \"ovn-controller-ovs-bk98m\" (UID: \"06ba8703-2573-4c30-82ec-36290cf378f4\") " pod="openstack/ovn-controller-ovs-bk98m" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.390306 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/06ba8703-2573-4c30-82ec-36290cf378f4-var-log\") pod \"ovn-controller-ovs-bk98m\" (UID: \"06ba8703-2573-4c30-82ec-36290cf378f4\") " pod="openstack/ovn-controller-ovs-bk98m" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.390367 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/06ba8703-2573-4c30-82ec-36290cf378f4-var-run\") pod \"ovn-controller-ovs-bk98m\" (UID: \"06ba8703-2573-4c30-82ec-36290cf378f4\") " pod="openstack/ovn-controller-ovs-bk98m" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.390453 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/06ba8703-2573-4c30-82ec-36290cf378f4-var-lib\") pod \"ovn-controller-ovs-bk98m\" (UID: \"06ba8703-2573-4c30-82ec-36290cf378f4\") " pod="openstack/ovn-controller-ovs-bk98m" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.390457 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/06ba8703-2573-4c30-82ec-36290cf378f4-etc-ovs\") pod \"ovn-controller-ovs-bk98m\" (UID: \"06ba8703-2573-4c30-82ec-36290cf378f4\") " pod="openstack/ovn-controller-ovs-bk98m" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.392567 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/06ba8703-2573-4c30-82ec-36290cf378f4-scripts\") pod \"ovn-controller-ovs-bk98m\" (UID: \"06ba8703-2573-4c30-82ec-36290cf378f4\") " pod="openstack/ovn-controller-ovs-bk98m" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.406342 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v5rlq\" (UniqueName: \"kubernetes.io/projected/06ba8703-2573-4c30-82ec-36290cf378f4-kube-api-access-v5rlq\") pod \"ovn-controller-ovs-bk98m\" (UID: \"06ba8703-2573-4c30-82ec-36290cf378f4\") " pod="openstack/ovn-controller-ovs-bk98m" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.488742 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-xqkct" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.517812 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-bk98m" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.687548 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.688776 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.691405 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-l7l7w" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.691417 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.691557 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.692157 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.692235 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.707666 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.795151 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/62419df2-740b-473d-8fff-9ea018a268e5-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"62419df2-740b-473d-8fff-9ea018a268e5\") " pod="openstack/ovsdbserver-nb-0" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.795230 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/62419df2-740b-473d-8fff-9ea018a268e5-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"62419df2-740b-473d-8fff-9ea018a268e5\") " pod="openstack/ovsdbserver-nb-0" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.795292 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/62419df2-740b-473d-8fff-9ea018a268e5-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"62419df2-740b-473d-8fff-9ea018a268e5\") " pod="openstack/ovsdbserver-nb-0" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.795344 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62419df2-740b-473d-8fff-9ea018a268e5-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"62419df2-740b-473d-8fff-9ea018a268e5\") " pod="openstack/ovsdbserver-nb-0" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.795379 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/62419df2-740b-473d-8fff-9ea018a268e5-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"62419df2-740b-473d-8fff-9ea018a268e5\") " pod="openstack/ovsdbserver-nb-0" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.795410 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6nxx4\" (UniqueName: \"kubernetes.io/projected/62419df2-740b-473d-8fff-9ea018a268e5-kube-api-access-6nxx4\") pod \"ovsdbserver-nb-0\" (UID: \"62419df2-740b-473d-8fff-9ea018a268e5\") " pod="openstack/ovsdbserver-nb-0" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.795429 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/62419df2-740b-473d-8fff-9ea018a268e5-config\") pod \"ovsdbserver-nb-0\" (UID: \"62419df2-740b-473d-8fff-9ea018a268e5\") " pod="openstack/ovsdbserver-nb-0" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.795456 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-nb-0\" (UID: \"62419df2-740b-473d-8fff-9ea018a268e5\") " pod="openstack/ovsdbserver-nb-0" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.896726 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/62419df2-740b-473d-8fff-9ea018a268e5-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"62419df2-740b-473d-8fff-9ea018a268e5\") " pod="openstack/ovsdbserver-nb-0" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.896780 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/62419df2-740b-473d-8fff-9ea018a268e5-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"62419df2-740b-473d-8fff-9ea018a268e5\") " pod="openstack/ovsdbserver-nb-0" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.896810 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/62419df2-740b-473d-8fff-9ea018a268e5-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"62419df2-740b-473d-8fff-9ea018a268e5\") " pod="openstack/ovsdbserver-nb-0" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.896849 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62419df2-740b-473d-8fff-9ea018a268e5-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"62419df2-740b-473d-8fff-9ea018a268e5\") " pod="openstack/ovsdbserver-nb-0" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.896872 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/62419df2-740b-473d-8fff-9ea018a268e5-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"62419df2-740b-473d-8fff-9ea018a268e5\") " pod="openstack/ovsdbserver-nb-0" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.896894 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6nxx4\" (UniqueName: \"kubernetes.io/projected/62419df2-740b-473d-8fff-9ea018a268e5-kube-api-access-6nxx4\") pod \"ovsdbserver-nb-0\" (UID: \"62419df2-740b-473d-8fff-9ea018a268e5\") " pod="openstack/ovsdbserver-nb-0" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.896941 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/62419df2-740b-473d-8fff-9ea018a268e5-config\") pod \"ovsdbserver-nb-0\" (UID: \"62419df2-740b-473d-8fff-9ea018a268e5\") " pod="openstack/ovsdbserver-nb-0" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.896966 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-nb-0\" (UID: \"62419df2-740b-473d-8fff-9ea018a268e5\") " pod="openstack/ovsdbserver-nb-0" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.897298 5021 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-nb-0\" (UID: \"62419df2-740b-473d-8fff-9ea018a268e5\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/ovsdbserver-nb-0" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.898550 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/62419df2-740b-473d-8fff-9ea018a268e5-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"62419df2-740b-473d-8fff-9ea018a268e5\") " pod="openstack/ovsdbserver-nb-0" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.899466 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/62419df2-740b-473d-8fff-9ea018a268e5-config\") pod \"ovsdbserver-nb-0\" (UID: \"62419df2-740b-473d-8fff-9ea018a268e5\") " pod="openstack/ovsdbserver-nb-0" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.899498 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/62419df2-740b-473d-8fff-9ea018a268e5-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"62419df2-740b-473d-8fff-9ea018a268e5\") " pod="openstack/ovsdbserver-nb-0" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.902457 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/62419df2-740b-473d-8fff-9ea018a268e5-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"62419df2-740b-473d-8fff-9ea018a268e5\") " pod="openstack/ovsdbserver-nb-0" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.902527 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62419df2-740b-473d-8fff-9ea018a268e5-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"62419df2-740b-473d-8fff-9ea018a268e5\") " pod="openstack/ovsdbserver-nb-0" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.904410 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/62419df2-740b-473d-8fff-9ea018a268e5-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"62419df2-740b-473d-8fff-9ea018a268e5\") " pod="openstack/ovsdbserver-nb-0" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.916557 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-nb-0\" (UID: \"62419df2-740b-473d-8fff-9ea018a268e5\") " pod="openstack/ovsdbserver-nb-0" Jan 21 15:41:47 crc kubenswrapper[5021]: I0121 15:41:47.924455 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6nxx4\" (UniqueName: \"kubernetes.io/projected/62419df2-740b-473d-8fff-9ea018a268e5-kube-api-access-6nxx4\") pod \"ovsdbserver-nb-0\" (UID: \"62419df2-740b-473d-8fff-9ea018a268e5\") " pod="openstack/ovsdbserver-nb-0" Jan 21 15:41:48 crc kubenswrapper[5021]: I0121 15:41:48.008756 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 21 15:41:50 crc kubenswrapper[5021]: I0121 15:41:50.183723 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 21 15:41:50 crc kubenswrapper[5021]: I0121 15:41:50.185233 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 21 15:41:50 crc kubenswrapper[5021]: I0121 15:41:50.186966 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Jan 21 15:41:50 crc kubenswrapper[5021]: I0121 15:41:50.187526 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Jan 21 15:41:50 crc kubenswrapper[5021]: I0121 15:41:50.187569 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Jan 21 15:41:50 crc kubenswrapper[5021]: I0121 15:41:50.187831 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-ngxfp" Jan 21 15:41:50 crc kubenswrapper[5021]: I0121 15:41:50.197554 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 21 15:41:50 crc kubenswrapper[5021]: I0121 15:41:50.232457 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/70ec8329-7d58-465c-9234-7e4543fe4538-config\") pod \"ovsdbserver-sb-0\" (UID: \"70ec8329-7d58-465c-9234-7e4543fe4538\") " pod="openstack/ovsdbserver-sb-0" Jan 21 15:41:50 crc kubenswrapper[5021]: I0121 15:41:50.232496 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dtjlz\" (UniqueName: \"kubernetes.io/projected/70ec8329-7d58-465c-9234-7e4543fe4538-kube-api-access-dtjlz\") pod \"ovsdbserver-sb-0\" (UID: \"70ec8329-7d58-465c-9234-7e4543fe4538\") " pod="openstack/ovsdbserver-sb-0" Jan 21 15:41:50 crc kubenswrapper[5021]: I0121 15:41:50.232587 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/70ec8329-7d58-465c-9234-7e4543fe4538-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"70ec8329-7d58-465c-9234-7e4543fe4538\") " pod="openstack/ovsdbserver-sb-0" Jan 21 15:41:50 crc kubenswrapper[5021]: I0121 15:41:50.232610 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70ec8329-7d58-465c-9234-7e4543fe4538-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"70ec8329-7d58-465c-9234-7e4543fe4538\") " pod="openstack/ovsdbserver-sb-0" Jan 21 15:41:50 crc kubenswrapper[5021]: I0121 15:41:50.232659 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/70ec8329-7d58-465c-9234-7e4543fe4538-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"70ec8329-7d58-465c-9234-7e4543fe4538\") " pod="openstack/ovsdbserver-sb-0" Jan 21 15:41:50 crc kubenswrapper[5021]: I0121 15:41:50.232698 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/70ec8329-7d58-465c-9234-7e4543fe4538-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"70ec8329-7d58-465c-9234-7e4543fe4538\") " pod="openstack/ovsdbserver-sb-0" Jan 21 15:41:50 crc kubenswrapper[5021]: I0121 15:41:50.232714 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ovsdbserver-sb-0\" (UID: \"70ec8329-7d58-465c-9234-7e4543fe4538\") " pod="openstack/ovsdbserver-sb-0" Jan 21 15:41:50 crc kubenswrapper[5021]: I0121 15:41:50.232812 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/70ec8329-7d58-465c-9234-7e4543fe4538-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"70ec8329-7d58-465c-9234-7e4543fe4538\") " pod="openstack/ovsdbserver-sb-0" Jan 21 15:41:50 crc kubenswrapper[5021]: I0121 15:41:50.334549 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/70ec8329-7d58-465c-9234-7e4543fe4538-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"70ec8329-7d58-465c-9234-7e4543fe4538\") " pod="openstack/ovsdbserver-sb-0" Jan 21 15:41:50 crc kubenswrapper[5021]: I0121 15:41:50.334869 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70ec8329-7d58-465c-9234-7e4543fe4538-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"70ec8329-7d58-465c-9234-7e4543fe4538\") " pod="openstack/ovsdbserver-sb-0" Jan 21 15:41:50 crc kubenswrapper[5021]: I0121 15:41:50.334944 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/70ec8329-7d58-465c-9234-7e4543fe4538-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"70ec8329-7d58-465c-9234-7e4543fe4538\") " pod="openstack/ovsdbserver-sb-0" Jan 21 15:41:50 crc kubenswrapper[5021]: I0121 15:41:50.334988 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/70ec8329-7d58-465c-9234-7e4543fe4538-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"70ec8329-7d58-465c-9234-7e4543fe4538\") " pod="openstack/ovsdbserver-sb-0" Jan 21 15:41:50 crc kubenswrapper[5021]: I0121 15:41:50.335006 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ovsdbserver-sb-0\" (UID: \"70ec8329-7d58-465c-9234-7e4543fe4538\") " pod="openstack/ovsdbserver-sb-0" Jan 21 15:41:50 crc kubenswrapper[5021]: I0121 15:41:50.335034 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/70ec8329-7d58-465c-9234-7e4543fe4538-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"70ec8329-7d58-465c-9234-7e4543fe4538\") " pod="openstack/ovsdbserver-sb-0" Jan 21 15:41:50 crc kubenswrapper[5021]: I0121 15:41:50.335063 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/70ec8329-7d58-465c-9234-7e4543fe4538-config\") pod \"ovsdbserver-sb-0\" (UID: \"70ec8329-7d58-465c-9234-7e4543fe4538\") " pod="openstack/ovsdbserver-sb-0" Jan 21 15:41:50 crc kubenswrapper[5021]: I0121 15:41:50.335082 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dtjlz\" (UniqueName: \"kubernetes.io/projected/70ec8329-7d58-465c-9234-7e4543fe4538-kube-api-access-dtjlz\") pod \"ovsdbserver-sb-0\" (UID: \"70ec8329-7d58-465c-9234-7e4543fe4538\") " pod="openstack/ovsdbserver-sb-0" Jan 21 15:41:50 crc kubenswrapper[5021]: I0121 15:41:50.335439 5021 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ovsdbserver-sb-0\" (UID: \"70ec8329-7d58-465c-9234-7e4543fe4538\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/ovsdbserver-sb-0" Jan 21 15:41:50 crc kubenswrapper[5021]: I0121 15:41:50.335739 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/70ec8329-7d58-465c-9234-7e4543fe4538-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"70ec8329-7d58-465c-9234-7e4543fe4538\") " pod="openstack/ovsdbserver-sb-0" Jan 21 15:41:50 crc kubenswrapper[5021]: I0121 15:41:50.335801 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/70ec8329-7d58-465c-9234-7e4543fe4538-config\") pod \"ovsdbserver-sb-0\" (UID: \"70ec8329-7d58-465c-9234-7e4543fe4538\") " pod="openstack/ovsdbserver-sb-0" Jan 21 15:41:50 crc kubenswrapper[5021]: I0121 15:41:50.336640 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/70ec8329-7d58-465c-9234-7e4543fe4538-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"70ec8329-7d58-465c-9234-7e4543fe4538\") " pod="openstack/ovsdbserver-sb-0" Jan 21 15:41:50 crc kubenswrapper[5021]: I0121 15:41:50.340357 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70ec8329-7d58-465c-9234-7e4543fe4538-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"70ec8329-7d58-465c-9234-7e4543fe4538\") " pod="openstack/ovsdbserver-sb-0" Jan 21 15:41:50 crc kubenswrapper[5021]: I0121 15:41:50.341240 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/70ec8329-7d58-465c-9234-7e4543fe4538-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"70ec8329-7d58-465c-9234-7e4543fe4538\") " pod="openstack/ovsdbserver-sb-0" Jan 21 15:41:50 crc kubenswrapper[5021]: I0121 15:41:50.344589 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/70ec8329-7d58-465c-9234-7e4543fe4538-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"70ec8329-7d58-465c-9234-7e4543fe4538\") " pod="openstack/ovsdbserver-sb-0" Jan 21 15:41:50 crc kubenswrapper[5021]: I0121 15:41:50.358345 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dtjlz\" (UniqueName: \"kubernetes.io/projected/70ec8329-7d58-465c-9234-7e4543fe4538-kube-api-access-dtjlz\") pod \"ovsdbserver-sb-0\" (UID: \"70ec8329-7d58-465c-9234-7e4543fe4538\") " pod="openstack/ovsdbserver-sb-0" Jan 21 15:41:50 crc kubenswrapper[5021]: I0121 15:41:50.371196 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ovsdbserver-sb-0\" (UID: \"70ec8329-7d58-465c-9234-7e4543fe4538\") " pod="openstack/ovsdbserver-sb-0" Jan 21 15:41:50 crc kubenswrapper[5021]: I0121 15:41:50.511625 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 21 15:42:23 crc kubenswrapper[5021]: E0121 15:42:23.382080 5021 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Jan 21 15:42:23 crc kubenswrapper[5021]: E0121 15:42:23.382695 5021 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kdxm9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-f7572_openstack(ed1e48ab-1f58-4eab-8702-a9c6a226ed8d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 21 15:42:23 crc kubenswrapper[5021]: E0121 15:42:23.383845 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-f7572" podUID="ed1e48ab-1f58-4eab-8702-a9c6a226ed8d" Jan 21 15:42:23 crc kubenswrapper[5021]: E0121 15:42:23.443718 5021 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Jan 21 15:42:23 crc kubenswrapper[5021]: E0121 15:42:23.443926 5021 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-tvqxs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-b5c89_openstack(41321a59-ac12-4265-b73e-55946ab813ad): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 21 15:42:23 crc kubenswrapper[5021]: E0121 15:42:23.445193 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-b5c89" podUID="41321a59-ac12-4265-b73e-55946ab813ad" Jan 21 15:42:23 crc kubenswrapper[5021]: E0121 15:42:23.469354 5021 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Jan 21 15:42:23 crc kubenswrapper[5021]: E0121 15:42:23.469527 5021 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n659h4h664hbh658h587h67ch89h587h8fh679hc6hf9h55fh644h5d5h698h68dh5cdh5ffh669h54ch9h689hb8hd4h5bfhd8h5d7h5fh665h574q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9ldmn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-57d769cc4f-ldkq4_openstack(3153e20e-a5d1-4e0e-a27f-8b4e63ce0561): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 21 15:42:23 crc kubenswrapper[5021]: E0121 15:42:23.470726 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-57d769cc4f-ldkq4" podUID="3153e20e-a5d1-4e0e-a27f-8b4e63ce0561" Jan 21 15:42:23 crc kubenswrapper[5021]: E0121 15:42:23.501622 5021 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Jan 21 15:42:23 crc kubenswrapper[5021]: E0121 15:42:23.501818 5021 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7hkjf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-666b6646f7-j8m5p_openstack(f85328a1-6c80-4408-8aa4-9be1197f3810): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 21 15:42:23 crc kubenswrapper[5021]: E0121 15:42:23.503049 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-666b6646f7-j8m5p" podUID="f85328a1-6c80-4408-8aa4-9be1197f3810" Jan 21 15:42:23 crc kubenswrapper[5021]: E0121 15:42:23.616488 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-57d769cc4f-ldkq4" podUID="3153e20e-a5d1-4e0e-a27f-8b4e63ce0561" Jan 21 15:42:23 crc kubenswrapper[5021]: E0121 15:42:23.616522 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-666b6646f7-j8m5p" podUID="f85328a1-6c80-4408-8aa4-9be1197f3810" Jan 21 15:42:24 crc kubenswrapper[5021]: E0121 15:42:24.645088 5021 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Jan 21 15:42:24 crc kubenswrapper[5021]: E0121 15:42:24.645469 5021 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-vmjw7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cell1-server-0_openstack(2dff28e1-6d0f-4a7d-8fcf-0edf26e63825): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 21 15:42:24 crc kubenswrapper[5021]: E0121 15:42:24.646997 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-cell1-server-0" podUID="2dff28e1-6d0f-4a7d-8fcf-0edf26e63825" Jan 21 15:42:24 crc kubenswrapper[5021]: I0121 15:42:24.955240 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-f7572" Jan 21 15:42:24 crc kubenswrapper[5021]: I0121 15:42:24.976385 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-b5c89" Jan 21 15:42:25 crc kubenswrapper[5021]: I0121 15:42:25.012797 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tvqxs\" (UniqueName: \"kubernetes.io/projected/41321a59-ac12-4265-b73e-55946ab813ad-kube-api-access-tvqxs\") pod \"41321a59-ac12-4265-b73e-55946ab813ad\" (UID: \"41321a59-ac12-4265-b73e-55946ab813ad\") " Jan 21 15:42:25 crc kubenswrapper[5021]: I0121 15:42:25.013232 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed1e48ab-1f58-4eab-8702-a9c6a226ed8d-config\") pod \"ed1e48ab-1f58-4eab-8702-a9c6a226ed8d\" (UID: \"ed1e48ab-1f58-4eab-8702-a9c6a226ed8d\") " Jan 21 15:42:25 crc kubenswrapper[5021]: I0121 15:42:25.013268 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kdxm9\" (UniqueName: \"kubernetes.io/projected/ed1e48ab-1f58-4eab-8702-a9c6a226ed8d-kube-api-access-kdxm9\") pod \"ed1e48ab-1f58-4eab-8702-a9c6a226ed8d\" (UID: \"ed1e48ab-1f58-4eab-8702-a9c6a226ed8d\") " Jan 21 15:42:25 crc kubenswrapper[5021]: I0121 15:42:25.013287 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ed1e48ab-1f58-4eab-8702-a9c6a226ed8d-dns-svc\") pod \"ed1e48ab-1f58-4eab-8702-a9c6a226ed8d\" (UID: \"ed1e48ab-1f58-4eab-8702-a9c6a226ed8d\") " Jan 21 15:42:25 crc kubenswrapper[5021]: I0121 15:42:25.013312 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/41321a59-ac12-4265-b73e-55946ab813ad-config\") pod \"41321a59-ac12-4265-b73e-55946ab813ad\" (UID: \"41321a59-ac12-4265-b73e-55946ab813ad\") " Jan 21 15:42:25 crc kubenswrapper[5021]: I0121 15:42:25.013589 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ed1e48ab-1f58-4eab-8702-a9c6a226ed8d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ed1e48ab-1f58-4eab-8702-a9c6a226ed8d" (UID: "ed1e48ab-1f58-4eab-8702-a9c6a226ed8d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:42:25 crc kubenswrapper[5021]: I0121 15:42:25.013694 5021 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ed1e48ab-1f58-4eab-8702-a9c6a226ed8d-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 21 15:42:25 crc kubenswrapper[5021]: I0121 15:42:25.013784 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/41321a59-ac12-4265-b73e-55946ab813ad-config" (OuterVolumeSpecName: "config") pod "41321a59-ac12-4265-b73e-55946ab813ad" (UID: "41321a59-ac12-4265-b73e-55946ab813ad"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:42:25 crc kubenswrapper[5021]: I0121 15:42:25.013843 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ed1e48ab-1f58-4eab-8702-a9c6a226ed8d-config" (OuterVolumeSpecName: "config") pod "ed1e48ab-1f58-4eab-8702-a9c6a226ed8d" (UID: "ed1e48ab-1f58-4eab-8702-a9c6a226ed8d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:42:25 crc kubenswrapper[5021]: I0121 15:42:25.017360 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed1e48ab-1f58-4eab-8702-a9c6a226ed8d-kube-api-access-kdxm9" (OuterVolumeSpecName: "kube-api-access-kdxm9") pod "ed1e48ab-1f58-4eab-8702-a9c6a226ed8d" (UID: "ed1e48ab-1f58-4eab-8702-a9c6a226ed8d"). InnerVolumeSpecName "kube-api-access-kdxm9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:42:25 crc kubenswrapper[5021]: I0121 15:42:25.017998 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/41321a59-ac12-4265-b73e-55946ab813ad-kube-api-access-tvqxs" (OuterVolumeSpecName: "kube-api-access-tvqxs") pod "41321a59-ac12-4265-b73e-55946ab813ad" (UID: "41321a59-ac12-4265-b73e-55946ab813ad"). InnerVolumeSpecName "kube-api-access-tvqxs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:42:25 crc kubenswrapper[5021]: I0121 15:42:25.115688 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tvqxs\" (UniqueName: \"kubernetes.io/projected/41321a59-ac12-4265-b73e-55946ab813ad-kube-api-access-tvqxs\") on node \"crc\" DevicePath \"\"" Jan 21 15:42:25 crc kubenswrapper[5021]: I0121 15:42:25.115725 5021 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed1e48ab-1f58-4eab-8702-a9c6a226ed8d-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:42:25 crc kubenswrapper[5021]: I0121 15:42:25.115738 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kdxm9\" (UniqueName: \"kubernetes.io/projected/ed1e48ab-1f58-4eab-8702-a9c6a226ed8d-kube-api-access-kdxm9\") on node \"crc\" DevicePath \"\"" Jan 21 15:42:25 crc kubenswrapper[5021]: I0121 15:42:25.115751 5021 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/41321a59-ac12-4265-b73e-55946ab813ad-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:42:25 crc kubenswrapper[5021]: I0121 15:42:25.215433 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-xqkct"] Jan 21 15:42:25 crc kubenswrapper[5021]: I0121 15:42:25.228660 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Jan 21 15:42:25 crc kubenswrapper[5021]: I0121 15:42:25.238020 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Jan 21 15:42:25 crc kubenswrapper[5021]: I0121 15:42:25.243747 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 21 15:42:25 crc kubenswrapper[5021]: W0121 15:42:25.247187 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4184ba08_6582_4367_abd3_9e9cffb5b716.slice/crio-2b11734da378e226d33ccec38e1180b54b38a46a04d8b34460087053ace814d4 WatchSource:0}: Error finding container 2b11734da378e226d33ccec38e1180b54b38a46a04d8b34460087053ace814d4: Status 404 returned error can't find the container with id 2b11734da378e226d33ccec38e1180b54b38a46a04d8b34460087053ace814d4 Jan 21 15:42:25 crc kubenswrapper[5021]: W0121 15:42:25.265315 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1093d499_bd73_4de4_b999_a7e9835b3124.slice/crio-4256fb6bbf6e559c69ce4bf690d3197064a3e9af1240726ef8a6c3af1cb2b1de WatchSource:0}: Error finding container 4256fb6bbf6e559c69ce4bf690d3197064a3e9af1240726ef8a6c3af1cb2b1de: Status 404 returned error can't find the container with id 4256fb6bbf6e559c69ce4bf690d3197064a3e9af1240726ef8a6c3af1cb2b1de Jan 21 15:42:25 crc kubenswrapper[5021]: I0121 15:42:25.359823 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 21 15:42:25 crc kubenswrapper[5021]: W0121 15:42:25.367177 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod70ec8329_7d58_465c_9234_7e4543fe4538.slice/crio-55d6661cfbcbb9b7b4db12c0dcd78baf6444f972a3a81f237f3aa89ffba15094 WatchSource:0}: Error finding container 55d6661cfbcbb9b7b4db12c0dcd78baf6444f972a3a81f237f3aa89ffba15094: Status 404 returned error can't find the container with id 55d6661cfbcbb9b7b4db12c0dcd78baf6444f972a3a81f237f3aa89ffba15094 Jan 21 15:42:25 crc kubenswrapper[5021]: I0121 15:42:25.387594 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 21 15:42:25 crc kubenswrapper[5021]: W0121 15:42:25.393730 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda0bd89c3_8fac_437e_9be8_b0703dd1be4c.slice/crio-6b38cbb0c3d4bb4a828fc533cdc6efb17a8297d506b00dcac4e172012e73516d WatchSource:0}: Error finding container 6b38cbb0c3d4bb4a828fc533cdc6efb17a8297d506b00dcac4e172012e73516d: Status 404 returned error can't find the container with id 6b38cbb0c3d4bb4a828fc533cdc6efb17a8297d506b00dcac4e172012e73516d Jan 21 15:42:25 crc kubenswrapper[5021]: I0121 15:42:25.640729 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"70ec8329-7d58-465c-9234-7e4543fe4538","Type":"ContainerStarted","Data":"55d6661cfbcbb9b7b4db12c0dcd78baf6444f972a3a81f237f3aa89ffba15094"} Jan 21 15:42:25 crc kubenswrapper[5021]: I0121 15:42:25.642543 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"fb584a2d-b396-4850-a7b5-3d827c42fe5a","Type":"ContainerStarted","Data":"34022b3320dcca12867c09d91ae9375c4ffad8abba87f069053418cbcdc45acc"} Jan 21 15:42:25 crc kubenswrapper[5021]: I0121 15:42:25.644625 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-b5c89" Jan 21 15:42:25 crc kubenswrapper[5021]: I0121 15:42:25.644613 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-b5c89" event={"ID":"41321a59-ac12-4265-b73e-55946ab813ad","Type":"ContainerDied","Data":"0036bb7c7203555b27abc857e3491679636e18a9ea36b266c818c44391e4f4cb"} Jan 21 15:42:25 crc kubenswrapper[5021]: I0121 15:42:25.647752 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"1093d499-bd73-4de4-b999-a7e9835b3124","Type":"ContainerStarted","Data":"4256fb6bbf6e559c69ce4bf690d3197064a3e9af1240726ef8a6c3af1cb2b1de"} Jan 21 15:42:25 crc kubenswrapper[5021]: I0121 15:42:25.651762 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-f7572" event={"ID":"ed1e48ab-1f58-4eab-8702-a9c6a226ed8d","Type":"ContainerDied","Data":"e1e298273d9c6b8701ec27ed0675a57342d3cfbdfc716feb245dd2feaddaf250"} Jan 21 15:42:25 crc kubenswrapper[5021]: I0121 15:42:25.651774 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-f7572" Jan 21 15:42:25 crc kubenswrapper[5021]: I0121 15:42:25.653888 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"4184ba08-6582-4367-abd3-9e9cffb5b716","Type":"ContainerStarted","Data":"2b11734da378e226d33ccec38e1180b54b38a46a04d8b34460087053ace814d4"} Jan 21 15:42:25 crc kubenswrapper[5021]: I0121 15:42:25.656245 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"a0bd89c3-8fac-437e-9be8-b0703dd1be4c","Type":"ContainerStarted","Data":"6b38cbb0c3d4bb4a828fc533cdc6efb17a8297d506b00dcac4e172012e73516d"} Jan 21 15:42:25 crc kubenswrapper[5021]: I0121 15:42:25.658733 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-xqkct" event={"ID":"a5d30216-0406-4ff3-a645-880381c2a661","Type":"ContainerStarted","Data":"dcb097e7c3dec28fdfc1ca170003124e5cffa2856cb794d32f925ff59d6bbdb9"} Jan 21 15:42:26 crc kubenswrapper[5021]: I0121 15:42:26.124300 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 21 15:42:26 crc kubenswrapper[5021]: W0121 15:42:26.125040 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod62419df2_740b_473d_8fff_9ea018a268e5.slice/crio-9533da90d4e9d5c4f271a69e4a56dba66a3d361d1ee6caf537d47476018674c4 WatchSource:0}: Error finding container 9533da90d4e9d5c4f271a69e4a56dba66a3d361d1ee6caf537d47476018674c4: Status 404 returned error can't find the container with id 9533da90d4e9d5c4f271a69e4a56dba66a3d361d1ee6caf537d47476018674c4 Jan 21 15:42:26 crc kubenswrapper[5021]: I0121 15:42:26.267454 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-b5c89"] Jan 21 15:42:26 crc kubenswrapper[5021]: I0121 15:42:26.273329 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-b5c89"] Jan 21 15:42:26 crc kubenswrapper[5021]: I0121 15:42:26.310428 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-f7572"] Jan 21 15:42:26 crc kubenswrapper[5021]: I0121 15:42:26.316383 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-f7572"] Jan 21 15:42:26 crc kubenswrapper[5021]: I0121 15:42:26.672338 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"62419df2-740b-473d-8fff-9ea018a268e5","Type":"ContainerStarted","Data":"9533da90d4e9d5c4f271a69e4a56dba66a3d361d1ee6caf537d47476018674c4"} Jan 21 15:42:26 crc kubenswrapper[5021]: I0121 15:42:26.675536 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b","Type":"ContainerStarted","Data":"d0442856928dc1e5b8f3e11f88a250e0738b0f4e137890a5b28dc4c331684638"} Jan 21 15:42:26 crc kubenswrapper[5021]: I0121 15:42:26.756049 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="41321a59-ac12-4265-b73e-55946ab813ad" path="/var/lib/kubelet/pods/41321a59-ac12-4265-b73e-55946ab813ad/volumes" Jan 21 15:42:26 crc kubenswrapper[5021]: I0121 15:42:26.756459 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ed1e48ab-1f58-4eab-8702-a9c6a226ed8d" path="/var/lib/kubelet/pods/ed1e48ab-1f58-4eab-8702-a9c6a226ed8d/volumes" Jan 21 15:42:26 crc kubenswrapper[5021]: I0121 15:42:26.756828 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-bk98m"] Jan 21 15:42:27 crc kubenswrapper[5021]: W0121 15:42:27.515320 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod06ba8703_2573_4c30_82ec_36290cf378f4.slice/crio-09e83d565c30e4beb63307b7b985ec1a19e3da74831ae43eebc55f4d1e802e09 WatchSource:0}: Error finding container 09e83d565c30e4beb63307b7b985ec1a19e3da74831ae43eebc55f4d1e802e09: Status 404 returned error can't find the container with id 09e83d565c30e4beb63307b7b985ec1a19e3da74831ae43eebc55f4d1e802e09 Jan 21 15:42:27 crc kubenswrapper[5021]: I0121 15:42:27.684176 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-bk98m" event={"ID":"06ba8703-2573-4c30-82ec-36290cf378f4","Type":"ContainerStarted","Data":"09e83d565c30e4beb63307b7b985ec1a19e3da74831ae43eebc55f4d1e802e09"} Jan 21 15:42:27 crc kubenswrapper[5021]: I0121 15:42:27.686354 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825","Type":"ContainerStarted","Data":"4d967a419656e209fc1a4f481f044e1e7ce77cfe738f17ca1985fd75b33cc897"} Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.151983 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-c764x"] Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.153848 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-c764x" Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.157816 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.162291 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-c764x"] Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.336812 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/0018f301-49d2-4884-abf4-23b4687de8fd-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-c764x\" (UID: \"0018f301-49d2-4884-abf4-23b4687de8fd\") " pod="openstack/ovn-controller-metrics-c764x" Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.337115 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/0018f301-49d2-4884-abf4-23b4687de8fd-ovn-rundir\") pod \"ovn-controller-metrics-c764x\" (UID: \"0018f301-49d2-4884-abf4-23b4687de8fd\") " pod="openstack/ovn-controller-metrics-c764x" Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.337145 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2m5kw\" (UniqueName: \"kubernetes.io/projected/0018f301-49d2-4884-abf4-23b4687de8fd-kube-api-access-2m5kw\") pod \"ovn-controller-metrics-c764x\" (UID: \"0018f301-49d2-4884-abf4-23b4687de8fd\") " pod="openstack/ovn-controller-metrics-c764x" Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.337185 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0018f301-49d2-4884-abf4-23b4687de8fd-config\") pod \"ovn-controller-metrics-c764x\" (UID: \"0018f301-49d2-4884-abf4-23b4687de8fd\") " pod="openstack/ovn-controller-metrics-c764x" Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.337205 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/0018f301-49d2-4884-abf4-23b4687de8fd-ovs-rundir\") pod \"ovn-controller-metrics-c764x\" (UID: \"0018f301-49d2-4884-abf4-23b4687de8fd\") " pod="openstack/ovn-controller-metrics-c764x" Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.337242 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0018f301-49d2-4884-abf4-23b4687de8fd-combined-ca-bundle\") pod \"ovn-controller-metrics-c764x\" (UID: \"0018f301-49d2-4884-abf4-23b4687de8fd\") " pod="openstack/ovn-controller-metrics-c764x" Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.376001 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-ldkq4"] Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.427633 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-vm44n"] Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.428935 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc7876d45-vm44n" Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.434216 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.440600 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/0018f301-49d2-4884-abf4-23b4687de8fd-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-c764x\" (UID: \"0018f301-49d2-4884-abf4-23b4687de8fd\") " pod="openstack/ovn-controller-metrics-c764x" Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.440644 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/0018f301-49d2-4884-abf4-23b4687de8fd-ovn-rundir\") pod \"ovn-controller-metrics-c764x\" (UID: \"0018f301-49d2-4884-abf4-23b4687de8fd\") " pod="openstack/ovn-controller-metrics-c764x" Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.440675 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2m5kw\" (UniqueName: \"kubernetes.io/projected/0018f301-49d2-4884-abf4-23b4687de8fd-kube-api-access-2m5kw\") pod \"ovn-controller-metrics-c764x\" (UID: \"0018f301-49d2-4884-abf4-23b4687de8fd\") " pod="openstack/ovn-controller-metrics-c764x" Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.440715 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0018f301-49d2-4884-abf4-23b4687de8fd-config\") pod \"ovn-controller-metrics-c764x\" (UID: \"0018f301-49d2-4884-abf4-23b4687de8fd\") " pod="openstack/ovn-controller-metrics-c764x" Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.440736 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/0018f301-49d2-4884-abf4-23b4687de8fd-ovs-rundir\") pod \"ovn-controller-metrics-c764x\" (UID: \"0018f301-49d2-4884-abf4-23b4687de8fd\") " pod="openstack/ovn-controller-metrics-c764x" Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.440772 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0018f301-49d2-4884-abf4-23b4687de8fd-combined-ca-bundle\") pod \"ovn-controller-metrics-c764x\" (UID: \"0018f301-49d2-4884-abf4-23b4687de8fd\") " pod="openstack/ovn-controller-metrics-c764x" Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.442726 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/0018f301-49d2-4884-abf4-23b4687de8fd-ovn-rundir\") pod \"ovn-controller-metrics-c764x\" (UID: \"0018f301-49d2-4884-abf4-23b4687de8fd\") " pod="openstack/ovn-controller-metrics-c764x" Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.443859 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0018f301-49d2-4884-abf4-23b4687de8fd-config\") pod \"ovn-controller-metrics-c764x\" (UID: \"0018f301-49d2-4884-abf4-23b4687de8fd\") " pod="openstack/ovn-controller-metrics-c764x" Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.443950 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/0018f301-49d2-4884-abf4-23b4687de8fd-ovs-rundir\") pod \"ovn-controller-metrics-c764x\" (UID: \"0018f301-49d2-4884-abf4-23b4687de8fd\") " pod="openstack/ovn-controller-metrics-c764x" Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.449621 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/0018f301-49d2-4884-abf4-23b4687de8fd-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-c764x\" (UID: \"0018f301-49d2-4884-abf4-23b4687de8fd\") " pod="openstack/ovn-controller-metrics-c764x" Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.450364 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0018f301-49d2-4884-abf4-23b4687de8fd-combined-ca-bundle\") pod \"ovn-controller-metrics-c764x\" (UID: \"0018f301-49d2-4884-abf4-23b4687de8fd\") " pod="openstack/ovn-controller-metrics-c764x" Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.461007 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-vm44n"] Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.471639 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2m5kw\" (UniqueName: \"kubernetes.io/projected/0018f301-49d2-4884-abf4-23b4687de8fd-kube-api-access-2m5kw\") pod \"ovn-controller-metrics-c764x\" (UID: \"0018f301-49d2-4884-abf4-23b4687de8fd\") " pod="openstack/ovn-controller-metrics-c764x" Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.505857 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-c764x" Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.542310 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j8jv4\" (UniqueName: \"kubernetes.io/projected/eea70a8c-42d6-4bc4-9fdd-3629874b87cd-kube-api-access-j8jv4\") pod \"dnsmasq-dns-6bc7876d45-vm44n\" (UID: \"eea70a8c-42d6-4bc4-9fdd-3629874b87cd\") " pod="openstack/dnsmasq-dns-6bc7876d45-vm44n" Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.542465 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eea70a8c-42d6-4bc4-9fdd-3629874b87cd-config\") pod \"dnsmasq-dns-6bc7876d45-vm44n\" (UID: \"eea70a8c-42d6-4bc4-9fdd-3629874b87cd\") " pod="openstack/dnsmasq-dns-6bc7876d45-vm44n" Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.542512 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/eea70a8c-42d6-4bc4-9fdd-3629874b87cd-dns-svc\") pod \"dnsmasq-dns-6bc7876d45-vm44n\" (UID: \"eea70a8c-42d6-4bc4-9fdd-3629874b87cd\") " pod="openstack/dnsmasq-dns-6bc7876d45-vm44n" Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.542556 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/eea70a8c-42d6-4bc4-9fdd-3629874b87cd-ovsdbserver-sb\") pod \"dnsmasq-dns-6bc7876d45-vm44n\" (UID: \"eea70a8c-42d6-4bc4-9fdd-3629874b87cd\") " pod="openstack/dnsmasq-dns-6bc7876d45-vm44n" Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.608571 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-j8m5p"] Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.644293 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j8jv4\" (UniqueName: \"kubernetes.io/projected/eea70a8c-42d6-4bc4-9fdd-3629874b87cd-kube-api-access-j8jv4\") pod \"dnsmasq-dns-6bc7876d45-vm44n\" (UID: \"eea70a8c-42d6-4bc4-9fdd-3629874b87cd\") " pod="openstack/dnsmasq-dns-6bc7876d45-vm44n" Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.644816 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eea70a8c-42d6-4bc4-9fdd-3629874b87cd-config\") pod \"dnsmasq-dns-6bc7876d45-vm44n\" (UID: \"eea70a8c-42d6-4bc4-9fdd-3629874b87cd\") " pod="openstack/dnsmasq-dns-6bc7876d45-vm44n" Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.644857 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/eea70a8c-42d6-4bc4-9fdd-3629874b87cd-dns-svc\") pod \"dnsmasq-dns-6bc7876d45-vm44n\" (UID: \"eea70a8c-42d6-4bc4-9fdd-3629874b87cd\") " pod="openstack/dnsmasq-dns-6bc7876d45-vm44n" Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.644885 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/eea70a8c-42d6-4bc4-9fdd-3629874b87cd-ovsdbserver-sb\") pod \"dnsmasq-dns-6bc7876d45-vm44n\" (UID: \"eea70a8c-42d6-4bc4-9fdd-3629874b87cd\") " pod="openstack/dnsmasq-dns-6bc7876d45-vm44n" Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.645815 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/eea70a8c-42d6-4bc4-9fdd-3629874b87cd-ovsdbserver-sb\") pod \"dnsmasq-dns-6bc7876d45-vm44n\" (UID: \"eea70a8c-42d6-4bc4-9fdd-3629874b87cd\") " pod="openstack/dnsmasq-dns-6bc7876d45-vm44n" Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.646337 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eea70a8c-42d6-4bc4-9fdd-3629874b87cd-config\") pod \"dnsmasq-dns-6bc7876d45-vm44n\" (UID: \"eea70a8c-42d6-4bc4-9fdd-3629874b87cd\") " pod="openstack/dnsmasq-dns-6bc7876d45-vm44n" Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.646730 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/eea70a8c-42d6-4bc4-9fdd-3629874b87cd-dns-svc\") pod \"dnsmasq-dns-6bc7876d45-vm44n\" (UID: \"eea70a8c-42d6-4bc4-9fdd-3629874b87cd\") " pod="openstack/dnsmasq-dns-6bc7876d45-vm44n" Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.654417 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8554648995-mmjgr"] Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.656422 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-mmjgr" Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.659003 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.676792 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8554648995-mmjgr"] Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.696119 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j8jv4\" (UniqueName: \"kubernetes.io/projected/eea70a8c-42d6-4bc4-9fdd-3629874b87cd-kube-api-access-j8jv4\") pod \"dnsmasq-dns-6bc7876d45-vm44n\" (UID: \"eea70a8c-42d6-4bc4-9fdd-3629874b87cd\") " pod="openstack/dnsmasq-dns-6bc7876d45-vm44n" Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.835780 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc7876d45-vm44n" Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.857018 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4e5b839a-247f-4e67-a522-d2a316caf769-config\") pod \"dnsmasq-dns-8554648995-mmjgr\" (UID: \"4e5b839a-247f-4e67-a522-d2a316caf769\") " pod="openstack/dnsmasq-dns-8554648995-mmjgr" Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.857355 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4e5b839a-247f-4e67-a522-d2a316caf769-dns-svc\") pod \"dnsmasq-dns-8554648995-mmjgr\" (UID: \"4e5b839a-247f-4e67-a522-d2a316caf769\") " pod="openstack/dnsmasq-dns-8554648995-mmjgr" Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.857375 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4e5b839a-247f-4e67-a522-d2a316caf769-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-mmjgr\" (UID: \"4e5b839a-247f-4e67-a522-d2a316caf769\") " pod="openstack/dnsmasq-dns-8554648995-mmjgr" Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.857488 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4e5b839a-247f-4e67-a522-d2a316caf769-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-mmjgr\" (UID: \"4e5b839a-247f-4e67-a522-d2a316caf769\") " pod="openstack/dnsmasq-dns-8554648995-mmjgr" Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.857514 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5ddpp\" (UniqueName: \"kubernetes.io/projected/4e5b839a-247f-4e67-a522-d2a316caf769-kube-api-access-5ddpp\") pod \"dnsmasq-dns-8554648995-mmjgr\" (UID: \"4e5b839a-247f-4e67-a522-d2a316caf769\") " pod="openstack/dnsmasq-dns-8554648995-mmjgr" Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.959467 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4e5b839a-247f-4e67-a522-d2a316caf769-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-mmjgr\" (UID: \"4e5b839a-247f-4e67-a522-d2a316caf769\") " pod="openstack/dnsmasq-dns-8554648995-mmjgr" Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.959562 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5ddpp\" (UniqueName: \"kubernetes.io/projected/4e5b839a-247f-4e67-a522-d2a316caf769-kube-api-access-5ddpp\") pod \"dnsmasq-dns-8554648995-mmjgr\" (UID: \"4e5b839a-247f-4e67-a522-d2a316caf769\") " pod="openstack/dnsmasq-dns-8554648995-mmjgr" Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.959649 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4e5b839a-247f-4e67-a522-d2a316caf769-config\") pod \"dnsmasq-dns-8554648995-mmjgr\" (UID: \"4e5b839a-247f-4e67-a522-d2a316caf769\") " pod="openstack/dnsmasq-dns-8554648995-mmjgr" Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.959672 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4e5b839a-247f-4e67-a522-d2a316caf769-dns-svc\") pod \"dnsmasq-dns-8554648995-mmjgr\" (UID: \"4e5b839a-247f-4e67-a522-d2a316caf769\") " pod="openstack/dnsmasq-dns-8554648995-mmjgr" Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.959693 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4e5b839a-247f-4e67-a522-d2a316caf769-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-mmjgr\" (UID: \"4e5b839a-247f-4e67-a522-d2a316caf769\") " pod="openstack/dnsmasq-dns-8554648995-mmjgr" Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.960592 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4e5b839a-247f-4e67-a522-d2a316caf769-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-mmjgr\" (UID: \"4e5b839a-247f-4e67-a522-d2a316caf769\") " pod="openstack/dnsmasq-dns-8554648995-mmjgr" Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.961178 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4e5b839a-247f-4e67-a522-d2a316caf769-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-mmjgr\" (UID: \"4e5b839a-247f-4e67-a522-d2a316caf769\") " pod="openstack/dnsmasq-dns-8554648995-mmjgr" Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.961983 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4e5b839a-247f-4e67-a522-d2a316caf769-config\") pod \"dnsmasq-dns-8554648995-mmjgr\" (UID: \"4e5b839a-247f-4e67-a522-d2a316caf769\") " pod="openstack/dnsmasq-dns-8554648995-mmjgr" Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.962518 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4e5b839a-247f-4e67-a522-d2a316caf769-dns-svc\") pod \"dnsmasq-dns-8554648995-mmjgr\" (UID: \"4e5b839a-247f-4e67-a522-d2a316caf769\") " pod="openstack/dnsmasq-dns-8554648995-mmjgr" Jan 21 15:42:31 crc kubenswrapper[5021]: I0121 15:42:31.980608 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5ddpp\" (UniqueName: \"kubernetes.io/projected/4e5b839a-247f-4e67-a522-d2a316caf769-kube-api-access-5ddpp\") pod \"dnsmasq-dns-8554648995-mmjgr\" (UID: \"4e5b839a-247f-4e67-a522-d2a316caf769\") " pod="openstack/dnsmasq-dns-8554648995-mmjgr" Jan 21 15:42:32 crc kubenswrapper[5021]: I0121 15:42:32.004939 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-mmjgr" Jan 21 15:42:32 crc kubenswrapper[5021]: I0121 15:42:32.064452 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-j8m5p" Jan 21 15:42:32 crc kubenswrapper[5021]: I0121 15:42:32.126448 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-ldkq4" Jan 21 15:42:32 crc kubenswrapper[5021]: I0121 15:42:32.265744 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7hkjf\" (UniqueName: \"kubernetes.io/projected/f85328a1-6c80-4408-8aa4-9be1197f3810-kube-api-access-7hkjf\") pod \"f85328a1-6c80-4408-8aa4-9be1197f3810\" (UID: \"f85328a1-6c80-4408-8aa4-9be1197f3810\") " Jan 21 15:42:32 crc kubenswrapper[5021]: I0121 15:42:32.265834 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3153e20e-a5d1-4e0e-a27f-8b4e63ce0561-dns-svc\") pod \"3153e20e-a5d1-4e0e-a27f-8b4e63ce0561\" (UID: \"3153e20e-a5d1-4e0e-a27f-8b4e63ce0561\") " Jan 21 15:42:32 crc kubenswrapper[5021]: I0121 15:42:32.265872 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3153e20e-a5d1-4e0e-a27f-8b4e63ce0561-config\") pod \"3153e20e-a5d1-4e0e-a27f-8b4e63ce0561\" (UID: \"3153e20e-a5d1-4e0e-a27f-8b4e63ce0561\") " Jan 21 15:42:32 crc kubenswrapper[5021]: I0121 15:42:32.265969 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9ldmn\" (UniqueName: \"kubernetes.io/projected/3153e20e-a5d1-4e0e-a27f-8b4e63ce0561-kube-api-access-9ldmn\") pod \"3153e20e-a5d1-4e0e-a27f-8b4e63ce0561\" (UID: \"3153e20e-a5d1-4e0e-a27f-8b4e63ce0561\") " Jan 21 15:42:32 crc kubenswrapper[5021]: I0121 15:42:32.266063 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f85328a1-6c80-4408-8aa4-9be1197f3810-config\") pod \"f85328a1-6c80-4408-8aa4-9be1197f3810\" (UID: \"f85328a1-6c80-4408-8aa4-9be1197f3810\") " Jan 21 15:42:32 crc kubenswrapper[5021]: I0121 15:42:32.266094 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f85328a1-6c80-4408-8aa4-9be1197f3810-dns-svc\") pod \"f85328a1-6c80-4408-8aa4-9be1197f3810\" (UID: \"f85328a1-6c80-4408-8aa4-9be1197f3810\") " Jan 21 15:42:32 crc kubenswrapper[5021]: I0121 15:42:32.266674 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3153e20e-a5d1-4e0e-a27f-8b4e63ce0561-config" (OuterVolumeSpecName: "config") pod "3153e20e-a5d1-4e0e-a27f-8b4e63ce0561" (UID: "3153e20e-a5d1-4e0e-a27f-8b4e63ce0561"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:42:32 crc kubenswrapper[5021]: I0121 15:42:32.266818 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f85328a1-6c80-4408-8aa4-9be1197f3810-config" (OuterVolumeSpecName: "config") pod "f85328a1-6c80-4408-8aa4-9be1197f3810" (UID: "f85328a1-6c80-4408-8aa4-9be1197f3810"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:42:32 crc kubenswrapper[5021]: I0121 15:42:32.267102 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3153e20e-a5d1-4e0e-a27f-8b4e63ce0561-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "3153e20e-a5d1-4e0e-a27f-8b4e63ce0561" (UID: "3153e20e-a5d1-4e0e-a27f-8b4e63ce0561"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:42:32 crc kubenswrapper[5021]: I0121 15:42:32.267201 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f85328a1-6c80-4408-8aa4-9be1197f3810-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "f85328a1-6c80-4408-8aa4-9be1197f3810" (UID: "f85328a1-6c80-4408-8aa4-9be1197f3810"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:42:32 crc kubenswrapper[5021]: I0121 15:42:32.269390 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3153e20e-a5d1-4e0e-a27f-8b4e63ce0561-kube-api-access-9ldmn" (OuterVolumeSpecName: "kube-api-access-9ldmn") pod "3153e20e-a5d1-4e0e-a27f-8b4e63ce0561" (UID: "3153e20e-a5d1-4e0e-a27f-8b4e63ce0561"). InnerVolumeSpecName "kube-api-access-9ldmn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:42:32 crc kubenswrapper[5021]: I0121 15:42:32.269955 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f85328a1-6c80-4408-8aa4-9be1197f3810-kube-api-access-7hkjf" (OuterVolumeSpecName: "kube-api-access-7hkjf") pod "f85328a1-6c80-4408-8aa4-9be1197f3810" (UID: "f85328a1-6c80-4408-8aa4-9be1197f3810"). InnerVolumeSpecName "kube-api-access-7hkjf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:42:32 crc kubenswrapper[5021]: I0121 15:42:32.369230 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7hkjf\" (UniqueName: \"kubernetes.io/projected/f85328a1-6c80-4408-8aa4-9be1197f3810-kube-api-access-7hkjf\") on node \"crc\" DevicePath \"\"" Jan 21 15:42:32 crc kubenswrapper[5021]: I0121 15:42:32.369265 5021 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3153e20e-a5d1-4e0e-a27f-8b4e63ce0561-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 21 15:42:32 crc kubenswrapper[5021]: I0121 15:42:32.369274 5021 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3153e20e-a5d1-4e0e-a27f-8b4e63ce0561-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:42:32 crc kubenswrapper[5021]: I0121 15:42:32.369283 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9ldmn\" (UniqueName: \"kubernetes.io/projected/3153e20e-a5d1-4e0e-a27f-8b4e63ce0561-kube-api-access-9ldmn\") on node \"crc\" DevicePath \"\"" Jan 21 15:42:32 crc kubenswrapper[5021]: I0121 15:42:32.369292 5021 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f85328a1-6c80-4408-8aa4-9be1197f3810-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:42:32 crc kubenswrapper[5021]: I0121 15:42:32.369299 5021 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f85328a1-6c80-4408-8aa4-9be1197f3810-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 21 15:42:32 crc kubenswrapper[5021]: I0121 15:42:32.738491 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-ldkq4" Jan 21 15:42:32 crc kubenswrapper[5021]: I0121 15:42:32.745405 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-j8m5p" Jan 21 15:42:32 crc kubenswrapper[5021]: I0121 15:42:32.764048 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-ldkq4" event={"ID":"3153e20e-a5d1-4e0e-a27f-8b4e63ce0561","Type":"ContainerDied","Data":"3f17cedb01df4e5fbe5cce320c515e12aa7ba76701343000bf36c2c8d2f7a642"} Jan 21 15:42:32 crc kubenswrapper[5021]: I0121 15:42:32.764123 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-j8m5p" event={"ID":"f85328a1-6c80-4408-8aa4-9be1197f3810","Type":"ContainerDied","Data":"7ece7a94b45a62c7fa69fc4fcc435e39c9c46a2f4270daae7bc35f2cdeedec52"} Jan 21 15:42:32 crc kubenswrapper[5021]: I0121 15:42:32.839056 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-ldkq4"] Jan 21 15:42:32 crc kubenswrapper[5021]: I0121 15:42:32.848500 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-ldkq4"] Jan 21 15:42:32 crc kubenswrapper[5021]: I0121 15:42:32.882990 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-j8m5p"] Jan 21 15:42:32 crc kubenswrapper[5021]: I0121 15:42:32.893084 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-j8m5p"] Jan 21 15:42:33 crc kubenswrapper[5021]: I0121 15:42:33.107114 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8554648995-mmjgr"] Jan 21 15:42:33 crc kubenswrapper[5021]: I0121 15:42:33.127280 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-c764x"] Jan 21 15:42:33 crc kubenswrapper[5021]: I0121 15:42:33.230003 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-vm44n"] Jan 21 15:42:33 crc kubenswrapper[5021]: W0121 15:42:33.265434 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4e5b839a_247f_4e67_a522_d2a316caf769.slice/crio-2f4c031f9a2707b46ec85287e0f9e29ade45f80816e0d6918b3cac1ddccd0afe WatchSource:0}: Error finding container 2f4c031f9a2707b46ec85287e0f9e29ade45f80816e0d6918b3cac1ddccd0afe: Status 404 returned error can't find the container with id 2f4c031f9a2707b46ec85287e0f9e29ade45f80816e0d6918b3cac1ddccd0afe Jan 21 15:42:33 crc kubenswrapper[5021]: W0121 15:42:33.277031 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podeea70a8c_42d6_4bc4_9fdd_3629874b87cd.slice/crio-c674e86e4bebe69470a3277f78e52424e2c96ff99cce036e8729e8d65caec83a WatchSource:0}: Error finding container c674e86e4bebe69470a3277f78e52424e2c96ff99cce036e8729e8d65caec83a: Status 404 returned error can't find the container with id c674e86e4bebe69470a3277f78e52424e2c96ff99cce036e8729e8d65caec83a Jan 21 15:42:33 crc kubenswrapper[5021]: W0121 15:42:33.366141 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0018f301_49d2_4884_abf4_23b4687de8fd.slice/crio-7541a46688dbec61388faf3a31f0fd51845574fd4739138cf0ba1e00a9847012 WatchSource:0}: Error finding container 7541a46688dbec61388faf3a31f0fd51845574fd4739138cf0ba1e00a9847012: Status 404 returned error can't find the container with id 7541a46688dbec61388faf3a31f0fd51845574fd4739138cf0ba1e00a9847012 Jan 21 15:42:33 crc kubenswrapper[5021]: I0121 15:42:33.753141 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"1093d499-bd73-4de4-b999-a7e9835b3124","Type":"ContainerStarted","Data":"8565907c6fd51c5e42e5d3b76024f497edf5f9e73fad7968004b1b553d69be4c"} Jan 21 15:42:33 crc kubenswrapper[5021]: I0121 15:42:33.755033 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-bk98m" event={"ID":"06ba8703-2573-4c30-82ec-36290cf378f4","Type":"ContainerStarted","Data":"b438938db2d41c72df61e1de39db4e6370631315aee4d17170236638d0e182e1"} Jan 21 15:42:33 crc kubenswrapper[5021]: I0121 15:42:33.756340 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-mmjgr" event={"ID":"4e5b839a-247f-4e67-a522-d2a316caf769","Type":"ContainerStarted","Data":"2f4c031f9a2707b46ec85287e0f9e29ade45f80816e0d6918b3cac1ddccd0afe"} Jan 21 15:42:33 crc kubenswrapper[5021]: I0121 15:42:33.757500 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"4184ba08-6582-4367-abd3-9e9cffb5b716","Type":"ContainerStarted","Data":"782dafa1c4c422293d7b99197b47f6a2f821efea69c75664d9a0de6a44a0cdee"} Jan 21 15:42:33 crc kubenswrapper[5021]: I0121 15:42:33.758641 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-c764x" event={"ID":"0018f301-49d2-4884-abf4-23b4687de8fd","Type":"ContainerStarted","Data":"7541a46688dbec61388faf3a31f0fd51845574fd4739138cf0ba1e00a9847012"} Jan 21 15:42:33 crc kubenswrapper[5021]: I0121 15:42:33.761334 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-xqkct" event={"ID":"a5d30216-0406-4ff3-a645-880381c2a661","Type":"ContainerStarted","Data":"a650fe4d3e5d417b81ce5f0db674f5341402d7b7ced37533a3ae41abbd2df4b7"} Jan 21 15:42:33 crc kubenswrapper[5021]: I0121 15:42:33.761395 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-xqkct" Jan 21 15:42:33 crc kubenswrapper[5021]: I0121 15:42:33.763018 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"fb584a2d-b396-4850-a7b5-3d827c42fe5a","Type":"ContainerStarted","Data":"9d81dbb84baa56528fb5b7d80ccd46ca92f10dbc96d74d445172433eb0f4dc44"} Jan 21 15:42:33 crc kubenswrapper[5021]: I0121 15:42:33.763098 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Jan 21 15:42:33 crc kubenswrapper[5021]: I0121 15:42:33.764900 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-vm44n" event={"ID":"eea70a8c-42d6-4bc4-9fdd-3629874b87cd","Type":"ContainerStarted","Data":"c674e86e4bebe69470a3277f78e52424e2c96ff99cce036e8729e8d65caec83a"} Jan 21 15:42:33 crc kubenswrapper[5021]: I0121 15:42:33.803422 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=46.058753126 podStartE2EDuration="52.803403356s" podCreationTimestamp="2026-01-21 15:41:41 +0000 UTC" firstStartedPulling="2026-01-21 15:42:25.245748791 +0000 UTC m=+1086.780862680" lastFinishedPulling="2026-01-21 15:42:31.990399031 +0000 UTC m=+1093.525512910" observedRunningTime="2026-01-21 15:42:33.795551461 +0000 UTC m=+1095.330665350" watchObservedRunningTime="2026-01-21 15:42:33.803403356 +0000 UTC m=+1095.338517235" Jan 21 15:42:33 crc kubenswrapper[5021]: I0121 15:42:33.820230 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-xqkct" podStartSLOduration=39.638553984 podStartE2EDuration="46.820208016s" podCreationTimestamp="2026-01-21 15:41:47 +0000 UTC" firstStartedPulling="2026-01-21 15:42:25.239827639 +0000 UTC m=+1086.774941528" lastFinishedPulling="2026-01-21 15:42:32.421481671 +0000 UTC m=+1093.956595560" observedRunningTime="2026-01-21 15:42:33.813550214 +0000 UTC m=+1095.348664103" watchObservedRunningTime="2026-01-21 15:42:33.820208016 +0000 UTC m=+1095.355321905" Jan 21 15:42:34 crc kubenswrapper[5021]: I0121 15:42:34.755042 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3153e20e-a5d1-4e0e-a27f-8b4e63ce0561" path="/var/lib/kubelet/pods/3153e20e-a5d1-4e0e-a27f-8b4e63ce0561/volumes" Jan 21 15:42:34 crc kubenswrapper[5021]: I0121 15:42:34.755474 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85328a1-6c80-4408-8aa4-9be1197f3810" path="/var/lib/kubelet/pods/f85328a1-6c80-4408-8aa4-9be1197f3810/volumes" Jan 21 15:42:34 crc kubenswrapper[5021]: I0121 15:42:34.777870 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"62419df2-740b-473d-8fff-9ea018a268e5","Type":"ContainerStarted","Data":"11ad5f2262a8d76744bb2dbd3bcaec7d5afd1ded3ec49308661f05117d09f401"} Jan 21 15:42:34 crc kubenswrapper[5021]: I0121 15:42:34.782394 5021 generic.go:334] "Generic (PLEG): container finished" podID="eea70a8c-42d6-4bc4-9fdd-3629874b87cd" containerID="b71f3a863178fd22ed7e36c7aa27e4f6eeebc73c78ab487fce13675c5ca4ffa6" exitCode=0 Jan 21 15:42:34 crc kubenswrapper[5021]: I0121 15:42:34.782452 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-vm44n" event={"ID":"eea70a8c-42d6-4bc4-9fdd-3629874b87cd","Type":"ContainerDied","Data":"b71f3a863178fd22ed7e36c7aa27e4f6eeebc73c78ab487fce13675c5ca4ffa6"} Jan 21 15:42:34 crc kubenswrapper[5021]: I0121 15:42:34.791419 5021 generic.go:334] "Generic (PLEG): container finished" podID="06ba8703-2573-4c30-82ec-36290cf378f4" containerID="b438938db2d41c72df61e1de39db4e6370631315aee4d17170236638d0e182e1" exitCode=0 Jan 21 15:42:34 crc kubenswrapper[5021]: I0121 15:42:34.791520 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-bk98m" event={"ID":"06ba8703-2573-4c30-82ec-36290cf378f4","Type":"ContainerDied","Data":"b438938db2d41c72df61e1de39db4e6370631315aee4d17170236638d0e182e1"} Jan 21 15:42:34 crc kubenswrapper[5021]: I0121 15:42:34.809839 5021 generic.go:334] "Generic (PLEG): container finished" podID="4e5b839a-247f-4e67-a522-d2a316caf769" containerID="5597c098754f20730917848d0abd2b7c5979f1b03a7d030560a5cdb07e3c024e" exitCode=0 Jan 21 15:42:34 crc kubenswrapper[5021]: I0121 15:42:34.809947 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-mmjgr" event={"ID":"4e5b839a-247f-4e67-a522-d2a316caf769","Type":"ContainerDied","Data":"5597c098754f20730917848d0abd2b7c5979f1b03a7d030560a5cdb07e3c024e"} Jan 21 15:42:34 crc kubenswrapper[5021]: I0121 15:42:34.812880 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"a0bd89c3-8fac-437e-9be8-b0703dd1be4c","Type":"ContainerStarted","Data":"63a56a368dac364949819e8a1463f6184ccb6e30760a5a57a555cda751e7913c"} Jan 21 15:42:34 crc kubenswrapper[5021]: I0121 15:42:34.813315 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Jan 21 15:42:34 crc kubenswrapper[5021]: I0121 15:42:34.815691 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"70ec8329-7d58-465c-9234-7e4543fe4538","Type":"ContainerStarted","Data":"c2577e6926c2b20d5997e13950dc6cfaeb16569002c47c4ba9f66d3aaaed7055"} Jan 21 15:42:34 crc kubenswrapper[5021]: I0121 15:42:34.878884 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=43.061991933 podStartE2EDuration="51.878866643s" podCreationTimestamp="2026-01-21 15:41:43 +0000 UTC" firstStartedPulling="2026-01-21 15:42:25.396703423 +0000 UTC m=+1086.931817312" lastFinishedPulling="2026-01-21 15:42:34.213578133 +0000 UTC m=+1095.748692022" observedRunningTime="2026-01-21 15:42:34.857749075 +0000 UTC m=+1096.392862964" watchObservedRunningTime="2026-01-21 15:42:34.878866643 +0000 UTC m=+1096.413980532" Jan 21 15:42:35 crc kubenswrapper[5021]: I0121 15:42:35.831710 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-vm44n" event={"ID":"eea70a8c-42d6-4bc4-9fdd-3629874b87cd","Type":"ContainerStarted","Data":"508c3ddad99866bcab612599c914710f2bf2501eae6bf37b0cf31a94d541638b"} Jan 21 15:42:35 crc kubenswrapper[5021]: I0121 15:42:35.832128 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6bc7876d45-vm44n" Jan 21 15:42:35 crc kubenswrapper[5021]: I0121 15:42:35.835135 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-bk98m" event={"ID":"06ba8703-2573-4c30-82ec-36290cf378f4","Type":"ContainerStarted","Data":"c9aea0f956b83c345479dd657b4586b966e9d9026a0a0d504fe374b1d7498204"} Jan 21 15:42:35 crc kubenswrapper[5021]: I0121 15:42:35.835225 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-bk98m" event={"ID":"06ba8703-2573-4c30-82ec-36290cf378f4","Type":"ContainerStarted","Data":"c51e2000b1b5cdd558697f3c6e26fdd8147721993df1b96e0e8e62f974486b5d"} Jan 21 15:42:35 crc kubenswrapper[5021]: I0121 15:42:35.836120 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-bk98m" Jan 21 15:42:35 crc kubenswrapper[5021]: I0121 15:42:35.837298 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-mmjgr" event={"ID":"4e5b839a-247f-4e67-a522-d2a316caf769","Type":"ContainerStarted","Data":"6b173375050768b9570ee74280e90c330022a40f605d1dda9bdaecdc281f7c57"} Jan 21 15:42:35 crc kubenswrapper[5021]: I0121 15:42:35.861242 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6bc7876d45-vm44n" podStartSLOduration=3.9229439900000003 podStartE2EDuration="4.861220852s" podCreationTimestamp="2026-01-21 15:42:31 +0000 UTC" firstStartedPulling="2026-01-21 15:42:33.279899307 +0000 UTC m=+1094.815013196" lastFinishedPulling="2026-01-21 15:42:34.218176169 +0000 UTC m=+1095.753290058" observedRunningTime="2026-01-21 15:42:35.854669123 +0000 UTC m=+1097.389783022" watchObservedRunningTime="2026-01-21 15:42:35.861220852 +0000 UTC m=+1097.396334741" Jan 21 15:42:35 crc kubenswrapper[5021]: I0121 15:42:35.880623 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-bk98m" podStartSLOduration=43.847262931 podStartE2EDuration="48.880607032s" podCreationTimestamp="2026-01-21 15:41:47 +0000 UTC" firstStartedPulling="2026-01-21 15:42:27.518340644 +0000 UTC m=+1089.053454533" lastFinishedPulling="2026-01-21 15:42:32.551684745 +0000 UTC m=+1094.086798634" observedRunningTime="2026-01-21 15:42:35.875817641 +0000 UTC m=+1097.410931540" watchObservedRunningTime="2026-01-21 15:42:35.880607032 +0000 UTC m=+1097.415720921" Jan 21 15:42:35 crc kubenswrapper[5021]: I0121 15:42:35.899864 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-8554648995-mmjgr" podStartSLOduration=3.953030603 podStartE2EDuration="4.899846059s" podCreationTimestamp="2026-01-21 15:42:31 +0000 UTC" firstStartedPulling="2026-01-21 15:42:33.267987971 +0000 UTC m=+1094.803101860" lastFinishedPulling="2026-01-21 15:42:34.214803427 +0000 UTC m=+1095.749917316" observedRunningTime="2026-01-21 15:42:35.893458005 +0000 UTC m=+1097.428571894" watchObservedRunningTime="2026-01-21 15:42:35.899846059 +0000 UTC m=+1097.434959948" Jan 21 15:42:36 crc kubenswrapper[5021]: I0121 15:42:36.845505 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-bk98m" Jan 21 15:42:36 crc kubenswrapper[5021]: I0121 15:42:36.845818 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-8554648995-mmjgr" Jan 21 15:42:37 crc kubenswrapper[5021]: I0121 15:42:37.866185 5021 generic.go:334] "Generic (PLEG): container finished" podID="1093d499-bd73-4de4-b999-a7e9835b3124" containerID="8565907c6fd51c5e42e5d3b76024f497edf5f9e73fad7968004b1b553d69be4c" exitCode=0 Jan 21 15:42:37 crc kubenswrapper[5021]: I0121 15:42:37.866326 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"1093d499-bd73-4de4-b999-a7e9835b3124","Type":"ContainerDied","Data":"8565907c6fd51c5e42e5d3b76024f497edf5f9e73fad7968004b1b553d69be4c"} Jan 21 15:42:38 crc kubenswrapper[5021]: I0121 15:42:38.875109 5021 generic.go:334] "Generic (PLEG): container finished" podID="4184ba08-6582-4367-abd3-9e9cffb5b716" containerID="782dafa1c4c422293d7b99197b47f6a2f821efea69c75664d9a0de6a44a0cdee" exitCode=0 Jan 21 15:42:38 crc kubenswrapper[5021]: I0121 15:42:38.875149 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"4184ba08-6582-4367-abd3-9e9cffb5b716","Type":"ContainerDied","Data":"782dafa1c4c422293d7b99197b47f6a2f821efea69c75664d9a0de6a44a0cdee"} Jan 21 15:42:39 crc kubenswrapper[5021]: I0121 15:42:39.889181 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"4184ba08-6582-4367-abd3-9e9cffb5b716","Type":"ContainerStarted","Data":"4a59683749a5d31a2fcc97158cd1d9ceb81e127e3f847cce0c972bac9d288d8e"} Jan 21 15:42:39 crc kubenswrapper[5021]: I0121 15:42:39.899764 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"1093d499-bd73-4de4-b999-a7e9835b3124","Type":"ContainerStarted","Data":"de590a6c44256e84fcc664627be518c9fb2c460d8c59cdb3123cc99eebe47520"} Jan 21 15:42:39 crc kubenswrapper[5021]: I0121 15:42:39.935780 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=52.805076853 podStartE2EDuration="59.93575944s" podCreationTimestamp="2026-01-21 15:41:40 +0000 UTC" firstStartedPulling="2026-01-21 15:42:25.267794254 +0000 UTC m=+1086.802908143" lastFinishedPulling="2026-01-21 15:42:32.398476841 +0000 UTC m=+1093.933590730" observedRunningTime="2026-01-21 15:42:39.930864195 +0000 UTC m=+1101.465978094" watchObservedRunningTime="2026-01-21 15:42:39.93575944 +0000 UTC m=+1101.470873329" Jan 21 15:42:39 crc kubenswrapper[5021]: I0121 15:42:39.937846 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=55.142744814 podStartE2EDuration="1m1.937835496s" podCreationTimestamp="2026-01-21 15:41:38 +0000 UTC" firstStartedPulling="2026-01-21 15:42:25.252603198 +0000 UTC m=+1086.787717087" lastFinishedPulling="2026-01-21 15:42:32.04769389 +0000 UTC m=+1093.582807769" observedRunningTime="2026-01-21 15:42:39.91203067 +0000 UTC m=+1101.447144569" watchObservedRunningTime="2026-01-21 15:42:39.937835496 +0000 UTC m=+1101.472949385" Jan 21 15:42:40 crc kubenswrapper[5021]: I0121 15:42:40.116601 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Jan 21 15:42:40 crc kubenswrapper[5021]: I0121 15:42:40.116660 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Jan 21 15:42:40 crc kubenswrapper[5021]: I0121 15:42:40.908297 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"62419df2-740b-473d-8fff-9ea018a268e5","Type":"ContainerStarted","Data":"be5a47b27b20c5b0ab887ff6ead17e4f4a7b05b5ba87488bbbbe349813177d79"} Jan 21 15:42:40 crc kubenswrapper[5021]: I0121 15:42:40.909783 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-c764x" event={"ID":"0018f301-49d2-4884-abf4-23b4687de8fd","Type":"ContainerStarted","Data":"68cab115728d1c091f3a993f8259c122399204e30f7f18e3a2170fa8fd30b98f"} Jan 21 15:42:40 crc kubenswrapper[5021]: I0121 15:42:40.911667 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"70ec8329-7d58-465c-9234-7e4543fe4538","Type":"ContainerStarted","Data":"00dc1a70c5582842a5b18750882608ff55e28ecac0f1421cc5d2e9d1a3cd1b00"} Jan 21 15:42:40 crc kubenswrapper[5021]: I0121 15:42:40.929421 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=41.200573647 podStartE2EDuration="54.929402116s" podCreationTimestamp="2026-01-21 15:41:46 +0000 UTC" firstStartedPulling="2026-01-21 15:42:26.127737112 +0000 UTC m=+1087.662851001" lastFinishedPulling="2026-01-21 15:42:39.856565571 +0000 UTC m=+1101.391679470" observedRunningTime="2026-01-21 15:42:40.923837274 +0000 UTC m=+1102.458951183" watchObservedRunningTime="2026-01-21 15:42:40.929402116 +0000 UTC m=+1102.464516005" Jan 21 15:42:40 crc kubenswrapper[5021]: I0121 15:42:40.949815 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=37.363059113 podStartE2EDuration="51.949792965s" podCreationTimestamp="2026-01-21 15:41:49 +0000 UTC" firstStartedPulling="2026-01-21 15:42:25.370159426 +0000 UTC m=+1086.905273315" lastFinishedPulling="2026-01-21 15:42:39.956893278 +0000 UTC m=+1101.492007167" observedRunningTime="2026-01-21 15:42:40.942339821 +0000 UTC m=+1102.477453710" watchObservedRunningTime="2026-01-21 15:42:40.949792965 +0000 UTC m=+1102.484906864" Jan 21 15:42:40 crc kubenswrapper[5021]: I0121 15:42:40.963497 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-c764x" podStartSLOduration=3.595946879 podStartE2EDuration="9.96347599s" podCreationTimestamp="2026-01-21 15:42:31 +0000 UTC" firstStartedPulling="2026-01-21 15:42:33.372648035 +0000 UTC m=+1094.907761924" lastFinishedPulling="2026-01-21 15:42:39.740177146 +0000 UTC m=+1101.275291035" observedRunningTime="2026-01-21 15:42:40.956617482 +0000 UTC m=+1102.491731371" watchObservedRunningTime="2026-01-21 15:42:40.96347599 +0000 UTC m=+1102.498589899" Jan 21 15:42:41 crc kubenswrapper[5021]: I0121 15:42:41.480252 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Jan 21 15:42:41 crc kubenswrapper[5021]: I0121 15:42:41.480582 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Jan 21 15:42:41 crc kubenswrapper[5021]: I0121 15:42:41.512271 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Jan 21 15:42:41 crc kubenswrapper[5021]: I0121 15:42:41.558235 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Jan 21 15:42:41 crc kubenswrapper[5021]: I0121 15:42:41.779301 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Jan 21 15:42:41 crc kubenswrapper[5021]: I0121 15:42:41.837122 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6bc7876d45-vm44n" Jan 21 15:42:41 crc kubenswrapper[5021]: I0121 15:42:41.920541 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Jan 21 15:42:41 crc kubenswrapper[5021]: I0121 15:42:41.967581 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Jan 21 15:42:42 crc kubenswrapper[5021]: I0121 15:42:42.006197 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-8554648995-mmjgr" Jan 21 15:42:42 crc kubenswrapper[5021]: I0121 15:42:42.010317 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Jan 21 15:42:42 crc kubenswrapper[5021]: I0121 15:42:42.047024 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Jan 21 15:42:42 crc kubenswrapper[5021]: I0121 15:42:42.078774 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-vm44n"] Jan 21 15:42:42 crc kubenswrapper[5021]: I0121 15:42:42.079060 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6bc7876d45-vm44n" podUID="eea70a8c-42d6-4bc4-9fdd-3629874b87cd" containerName="dnsmasq-dns" containerID="cri-o://508c3ddad99866bcab612599c914710f2bf2501eae6bf37b0cf31a94d541638b" gracePeriod=10 Jan 21 15:42:42 crc kubenswrapper[5021]: I0121 15:42:42.934427 5021 generic.go:334] "Generic (PLEG): container finished" podID="eea70a8c-42d6-4bc4-9fdd-3629874b87cd" containerID="508c3ddad99866bcab612599c914710f2bf2501eae6bf37b0cf31a94d541638b" exitCode=0 Jan 21 15:42:42 crc kubenswrapper[5021]: I0121 15:42:42.935156 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-vm44n" event={"ID":"eea70a8c-42d6-4bc4-9fdd-3629874b87cd","Type":"ContainerDied","Data":"508c3ddad99866bcab612599c914710f2bf2501eae6bf37b0cf31a94d541638b"} Jan 21 15:42:42 crc kubenswrapper[5021]: I0121 15:42:42.935338 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Jan 21 15:42:42 crc kubenswrapper[5021]: I0121 15:42:42.986448 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.125188 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.126756 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.144239 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.144505 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.144826 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.145288 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-p8nwj" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.172592 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.214258 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc7876d45-vm44n" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.269565 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/e360fedf-3856-4f89-980d-f5f282e2f696-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"e360fedf-3856-4f89-980d-f5f282e2f696\") " pod="openstack/ovn-northd-0" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.269845 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e360fedf-3856-4f89-980d-f5f282e2f696-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"e360fedf-3856-4f89-980d-f5f282e2f696\") " pod="openstack/ovn-northd-0" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.269887 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e360fedf-3856-4f89-980d-f5f282e2f696-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"e360fedf-3856-4f89-980d-f5f282e2f696\") " pod="openstack/ovn-northd-0" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.269920 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e360fedf-3856-4f89-980d-f5f282e2f696-scripts\") pod \"ovn-northd-0\" (UID: \"e360fedf-3856-4f89-980d-f5f282e2f696\") " pod="openstack/ovn-northd-0" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.269956 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e360fedf-3856-4f89-980d-f5f282e2f696-config\") pod \"ovn-northd-0\" (UID: \"e360fedf-3856-4f89-980d-f5f282e2f696\") " pod="openstack/ovn-northd-0" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.270017 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rnnxm\" (UniqueName: \"kubernetes.io/projected/e360fedf-3856-4f89-980d-f5f282e2f696-kube-api-access-rnnxm\") pod \"ovn-northd-0\" (UID: \"e360fedf-3856-4f89-980d-f5f282e2f696\") " pod="openstack/ovn-northd-0" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.270040 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/e360fedf-3856-4f89-980d-f5f282e2f696-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"e360fedf-3856-4f89-980d-f5f282e2f696\") " pod="openstack/ovn-northd-0" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.368357 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-hmcx6"] Jan 21 15:42:43 crc kubenswrapper[5021]: E0121 15:42:43.373491 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eea70a8c-42d6-4bc4-9fdd-3629874b87cd" containerName="dnsmasq-dns" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.373726 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="eea70a8c-42d6-4bc4-9fdd-3629874b87cd" containerName="dnsmasq-dns" Jan 21 15:42:43 crc kubenswrapper[5021]: E0121 15:42:43.373797 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eea70a8c-42d6-4bc4-9fdd-3629874b87cd" containerName="init" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.373864 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="eea70a8c-42d6-4bc4-9fdd-3629874b87cd" containerName="init" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.374106 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="eea70a8c-42d6-4bc4-9fdd-3629874b87cd" containerName="dnsmasq-dns" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.371444 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/eea70a8c-42d6-4bc4-9fdd-3629874b87cd-dns-svc\") pod \"eea70a8c-42d6-4bc4-9fdd-3629874b87cd\" (UID: \"eea70a8c-42d6-4bc4-9fdd-3629874b87cd\") " Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.375078 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-hmcx6" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.375123 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/eea70a8c-42d6-4bc4-9fdd-3629874b87cd-ovsdbserver-sb\") pod \"eea70a8c-42d6-4bc4-9fdd-3629874b87cd\" (UID: \"eea70a8c-42d6-4bc4-9fdd-3629874b87cd\") " Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.380191 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j8jv4\" (UniqueName: \"kubernetes.io/projected/eea70a8c-42d6-4bc4-9fdd-3629874b87cd-kube-api-access-j8jv4\") pod \"eea70a8c-42d6-4bc4-9fdd-3629874b87cd\" (UID: \"eea70a8c-42d6-4bc4-9fdd-3629874b87cd\") " Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.380942 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eea70a8c-42d6-4bc4-9fdd-3629874b87cd-config\") pod \"eea70a8c-42d6-4bc4-9fdd-3629874b87cd\" (UID: \"eea70a8c-42d6-4bc4-9fdd-3629874b87cd\") " Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.381309 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/e360fedf-3856-4f89-980d-f5f282e2f696-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"e360fedf-3856-4f89-980d-f5f282e2f696\") " pod="openstack/ovn-northd-0" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.381471 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e360fedf-3856-4f89-980d-f5f282e2f696-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"e360fedf-3856-4f89-980d-f5f282e2f696\") " pod="openstack/ovn-northd-0" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.381597 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e360fedf-3856-4f89-980d-f5f282e2f696-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"e360fedf-3856-4f89-980d-f5f282e2f696\") " pod="openstack/ovn-northd-0" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.381683 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e360fedf-3856-4f89-980d-f5f282e2f696-scripts\") pod \"ovn-northd-0\" (UID: \"e360fedf-3856-4f89-980d-f5f282e2f696\") " pod="openstack/ovn-northd-0" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.381791 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e360fedf-3856-4f89-980d-f5f282e2f696-config\") pod \"ovn-northd-0\" (UID: \"e360fedf-3856-4f89-980d-f5f282e2f696\") " pod="openstack/ovn-northd-0" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.381864 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rnnxm\" (UniqueName: \"kubernetes.io/projected/e360fedf-3856-4f89-980d-f5f282e2f696-kube-api-access-rnnxm\") pod \"ovn-northd-0\" (UID: \"e360fedf-3856-4f89-980d-f5f282e2f696\") " pod="openstack/ovn-northd-0" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.381993 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/e360fedf-3856-4f89-980d-f5f282e2f696-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"e360fedf-3856-4f89-980d-f5f282e2f696\") " pod="openstack/ovn-northd-0" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.383022 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e360fedf-3856-4f89-980d-f5f282e2f696-scripts\") pod \"ovn-northd-0\" (UID: \"e360fedf-3856-4f89-980d-f5f282e2f696\") " pod="openstack/ovn-northd-0" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.383788 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e360fedf-3856-4f89-980d-f5f282e2f696-config\") pod \"ovn-northd-0\" (UID: \"e360fedf-3856-4f89-980d-f5f282e2f696\") " pod="openstack/ovn-northd-0" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.384392 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/e360fedf-3856-4f89-980d-f5f282e2f696-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"e360fedf-3856-4f89-980d-f5f282e2f696\") " pod="openstack/ovn-northd-0" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.401770 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e360fedf-3856-4f89-980d-f5f282e2f696-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"e360fedf-3856-4f89-980d-f5f282e2f696\") " pod="openstack/ovn-northd-0" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.402298 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eea70a8c-42d6-4bc4-9fdd-3629874b87cd-kube-api-access-j8jv4" (OuterVolumeSpecName: "kube-api-access-j8jv4") pod "eea70a8c-42d6-4bc4-9fdd-3629874b87cd" (UID: "eea70a8c-42d6-4bc4-9fdd-3629874b87cd"). InnerVolumeSpecName "kube-api-access-j8jv4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.403110 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e360fedf-3856-4f89-980d-f5f282e2f696-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"e360fedf-3856-4f89-980d-f5f282e2f696\") " pod="openstack/ovn-northd-0" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.408040 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.414858 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-hmcx6"] Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.447122 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rnnxm\" (UniqueName: \"kubernetes.io/projected/e360fedf-3856-4f89-980d-f5f282e2f696-kube-api-access-rnnxm\") pod \"ovn-northd-0\" (UID: \"e360fedf-3856-4f89-980d-f5f282e2f696\") " pod="openstack/ovn-northd-0" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.452384 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/e360fedf-3856-4f89-980d-f5f282e2f696-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"e360fedf-3856-4f89-980d-f5f282e2f696\") " pod="openstack/ovn-northd-0" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.485288 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bc460a0a-094d-4c9d-882f-3bd263f6a4c3-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-hmcx6\" (UID: \"bc460a0a-094d-4c9d-882f-3bd263f6a4c3\") " pod="openstack/dnsmasq-dns-b8fbc5445-hmcx6" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.485402 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bc460a0a-094d-4c9d-882f-3bd263f6a4c3-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-hmcx6\" (UID: \"bc460a0a-094d-4c9d-882f-3bd263f6a4c3\") " pod="openstack/dnsmasq-dns-b8fbc5445-hmcx6" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.485445 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bc460a0a-094d-4c9d-882f-3bd263f6a4c3-config\") pod \"dnsmasq-dns-b8fbc5445-hmcx6\" (UID: \"bc460a0a-094d-4c9d-882f-3bd263f6a4c3\") " pod="openstack/dnsmasq-dns-b8fbc5445-hmcx6" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.485499 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pw2ms\" (UniqueName: \"kubernetes.io/projected/bc460a0a-094d-4c9d-882f-3bd263f6a4c3-kube-api-access-pw2ms\") pod \"dnsmasq-dns-b8fbc5445-hmcx6\" (UID: \"bc460a0a-094d-4c9d-882f-3bd263f6a4c3\") " pod="openstack/dnsmasq-dns-b8fbc5445-hmcx6" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.485571 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bc460a0a-094d-4c9d-882f-3bd263f6a4c3-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-hmcx6\" (UID: \"bc460a0a-094d-4c9d-882f-3bd263f6a4c3\") " pod="openstack/dnsmasq-dns-b8fbc5445-hmcx6" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.485736 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j8jv4\" (UniqueName: \"kubernetes.io/projected/eea70a8c-42d6-4bc4-9fdd-3629874b87cd-kube-api-access-j8jv4\") on node \"crc\" DevicePath \"\"" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.539776 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eea70a8c-42d6-4bc4-9fdd-3629874b87cd-config" (OuterVolumeSpecName: "config") pod "eea70a8c-42d6-4bc4-9fdd-3629874b87cd" (UID: "eea70a8c-42d6-4bc4-9fdd-3629874b87cd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.549323 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.555367 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eea70a8c-42d6-4bc4-9fdd-3629874b87cd-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "eea70a8c-42d6-4bc4-9fdd-3629874b87cd" (UID: "eea70a8c-42d6-4bc4-9fdd-3629874b87cd"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.597885 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bc460a0a-094d-4c9d-882f-3bd263f6a4c3-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-hmcx6\" (UID: \"bc460a0a-094d-4c9d-882f-3bd263f6a4c3\") " pod="openstack/dnsmasq-dns-b8fbc5445-hmcx6" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.597950 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bc460a0a-094d-4c9d-882f-3bd263f6a4c3-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-hmcx6\" (UID: \"bc460a0a-094d-4c9d-882f-3bd263f6a4c3\") " pod="openstack/dnsmasq-dns-b8fbc5445-hmcx6" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.597981 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bc460a0a-094d-4c9d-882f-3bd263f6a4c3-config\") pod \"dnsmasq-dns-b8fbc5445-hmcx6\" (UID: \"bc460a0a-094d-4c9d-882f-3bd263f6a4c3\") " pod="openstack/dnsmasq-dns-b8fbc5445-hmcx6" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.598002 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pw2ms\" (UniqueName: \"kubernetes.io/projected/bc460a0a-094d-4c9d-882f-3bd263f6a4c3-kube-api-access-pw2ms\") pod \"dnsmasq-dns-b8fbc5445-hmcx6\" (UID: \"bc460a0a-094d-4c9d-882f-3bd263f6a4c3\") " pod="openstack/dnsmasq-dns-b8fbc5445-hmcx6" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.598042 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bc460a0a-094d-4c9d-882f-3bd263f6a4c3-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-hmcx6\" (UID: \"bc460a0a-094d-4c9d-882f-3bd263f6a4c3\") " pod="openstack/dnsmasq-dns-b8fbc5445-hmcx6" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.598081 5021 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/eea70a8c-42d6-4bc4-9fdd-3629874b87cd-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.598094 5021 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eea70a8c-42d6-4bc4-9fdd-3629874b87cd-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.598877 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bc460a0a-094d-4c9d-882f-3bd263f6a4c3-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-hmcx6\" (UID: \"bc460a0a-094d-4c9d-882f-3bd263f6a4c3\") " pod="openstack/dnsmasq-dns-b8fbc5445-hmcx6" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.599402 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bc460a0a-094d-4c9d-882f-3bd263f6a4c3-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-hmcx6\" (UID: \"bc460a0a-094d-4c9d-882f-3bd263f6a4c3\") " pod="openstack/dnsmasq-dns-b8fbc5445-hmcx6" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.599969 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bc460a0a-094d-4c9d-882f-3bd263f6a4c3-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-hmcx6\" (UID: \"bc460a0a-094d-4c9d-882f-3bd263f6a4c3\") " pod="openstack/dnsmasq-dns-b8fbc5445-hmcx6" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.602896 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bc460a0a-094d-4c9d-882f-3bd263f6a4c3-config\") pod \"dnsmasq-dns-b8fbc5445-hmcx6\" (UID: \"bc460a0a-094d-4c9d-882f-3bd263f6a4c3\") " pod="openstack/dnsmasq-dns-b8fbc5445-hmcx6" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.605209 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eea70a8c-42d6-4bc4-9fdd-3629874b87cd-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "eea70a8c-42d6-4bc4-9fdd-3629874b87cd" (UID: "eea70a8c-42d6-4bc4-9fdd-3629874b87cd"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.634507 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pw2ms\" (UniqueName: \"kubernetes.io/projected/bc460a0a-094d-4c9d-882f-3bd263f6a4c3-kube-api-access-pw2ms\") pod \"dnsmasq-dns-b8fbc5445-hmcx6\" (UID: \"bc460a0a-094d-4c9d-882f-3bd263f6a4c3\") " pod="openstack/dnsmasq-dns-b8fbc5445-hmcx6" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.700004 5021 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/eea70a8c-42d6-4bc4-9fdd-3629874b87cd-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.901615 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-hmcx6" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.949078 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc7876d45-vm44n" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.949548 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-vm44n" event={"ID":"eea70a8c-42d6-4bc4-9fdd-3629874b87cd","Type":"ContainerDied","Data":"c674e86e4bebe69470a3277f78e52424e2c96ff99cce036e8729e8d65caec83a"} Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.949589 5021 scope.go:117] "RemoveContainer" containerID="508c3ddad99866bcab612599c914710f2bf2501eae6bf37b0cf31a94d541638b" Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.987884 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-vm44n"] Jan 21 15:42:43 crc kubenswrapper[5021]: I0121 15:42:43.994075 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-vm44n"] Jan 21 15:42:44 crc kubenswrapper[5021]: I0121 15:42:44.747108 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eea70a8c-42d6-4bc4-9fdd-3629874b87cd" path="/var/lib/kubelet/pods/eea70a8c-42d6-4bc4-9fdd-3629874b87cd/volumes" Jan 21 15:42:45 crc kubenswrapper[5021]: I0121 15:42:45.638433 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Jan 21 15:42:45 crc kubenswrapper[5021]: I0121 15:42:45.646261 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 21 15:42:45 crc kubenswrapper[5021]: I0121 15:42:45.646965 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Jan 21 15:42:45 crc kubenswrapper[5021]: I0121 15:42:45.649853 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Jan 21 15:42:45 crc kubenswrapper[5021]: I0121 15:42:45.650278 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Jan 21 15:42:45 crc kubenswrapper[5021]: I0121 15:42:45.654369 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-cgg98" Jan 21 15:42:45 crc kubenswrapper[5021]: I0121 15:42:45.660831 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Jan 21 15:42:45 crc kubenswrapper[5021]: I0121 15:42:45.661361 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-hmcx6"] Jan 21 15:42:45 crc kubenswrapper[5021]: I0121 15:42:45.675424 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Jan 21 15:42:45 crc kubenswrapper[5021]: I0121 15:42:45.831446 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/c57ca8a9-e2f8-4404-b56f-649297cba618-etc-swift\") pod \"swift-storage-0\" (UID: \"c57ca8a9-e2f8-4404-b56f-649297cba618\") " pod="openstack/swift-storage-0" Jan 21 15:42:45 crc kubenswrapper[5021]: I0121 15:42:45.831732 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"swift-storage-0\" (UID: \"c57ca8a9-e2f8-4404-b56f-649297cba618\") " pod="openstack/swift-storage-0" Jan 21 15:42:45 crc kubenswrapper[5021]: I0121 15:42:45.831843 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/c57ca8a9-e2f8-4404-b56f-649297cba618-cache\") pod \"swift-storage-0\" (UID: \"c57ca8a9-e2f8-4404-b56f-649297cba618\") " pod="openstack/swift-storage-0" Jan 21 15:42:45 crc kubenswrapper[5021]: I0121 15:42:45.831974 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/c57ca8a9-e2f8-4404-b56f-649297cba618-lock\") pod \"swift-storage-0\" (UID: \"c57ca8a9-e2f8-4404-b56f-649297cba618\") " pod="openstack/swift-storage-0" Jan 21 15:42:45 crc kubenswrapper[5021]: I0121 15:42:45.832199 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4zn7f\" (UniqueName: \"kubernetes.io/projected/c57ca8a9-e2f8-4404-b56f-649297cba618-kube-api-access-4zn7f\") pod \"swift-storage-0\" (UID: \"c57ca8a9-e2f8-4404-b56f-649297cba618\") " pod="openstack/swift-storage-0" Jan 21 15:42:45 crc kubenswrapper[5021]: I0121 15:42:45.934047 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/c57ca8a9-e2f8-4404-b56f-649297cba618-lock\") pod \"swift-storage-0\" (UID: \"c57ca8a9-e2f8-4404-b56f-649297cba618\") " pod="openstack/swift-storage-0" Jan 21 15:42:45 crc kubenswrapper[5021]: I0121 15:42:45.934203 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4zn7f\" (UniqueName: \"kubernetes.io/projected/c57ca8a9-e2f8-4404-b56f-649297cba618-kube-api-access-4zn7f\") pod \"swift-storage-0\" (UID: \"c57ca8a9-e2f8-4404-b56f-649297cba618\") " pod="openstack/swift-storage-0" Jan 21 15:42:45 crc kubenswrapper[5021]: I0121 15:42:45.934251 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/c57ca8a9-e2f8-4404-b56f-649297cba618-etc-swift\") pod \"swift-storage-0\" (UID: \"c57ca8a9-e2f8-4404-b56f-649297cba618\") " pod="openstack/swift-storage-0" Jan 21 15:42:45 crc kubenswrapper[5021]: I0121 15:42:45.934284 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"swift-storage-0\" (UID: \"c57ca8a9-e2f8-4404-b56f-649297cba618\") " pod="openstack/swift-storage-0" Jan 21 15:42:45 crc kubenswrapper[5021]: I0121 15:42:45.934321 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/c57ca8a9-e2f8-4404-b56f-649297cba618-cache\") pod \"swift-storage-0\" (UID: \"c57ca8a9-e2f8-4404-b56f-649297cba618\") " pod="openstack/swift-storage-0" Jan 21 15:42:45 crc kubenswrapper[5021]: I0121 15:42:45.934985 5021 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"swift-storage-0\" (UID: \"c57ca8a9-e2f8-4404-b56f-649297cba618\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/swift-storage-0" Jan 21 15:42:45 crc kubenswrapper[5021]: E0121 15:42:45.935418 5021 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 21 15:42:45 crc kubenswrapper[5021]: I0121 15:42:45.935456 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/c57ca8a9-e2f8-4404-b56f-649297cba618-lock\") pod \"swift-storage-0\" (UID: \"c57ca8a9-e2f8-4404-b56f-649297cba618\") " pod="openstack/swift-storage-0" Jan 21 15:42:45 crc kubenswrapper[5021]: E0121 15:42:45.935465 5021 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 21 15:42:45 crc kubenswrapper[5021]: E0121 15:42:45.935552 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c57ca8a9-e2f8-4404-b56f-649297cba618-etc-swift podName:c57ca8a9-e2f8-4404-b56f-649297cba618 nodeName:}" failed. No retries permitted until 2026-01-21 15:42:46.435531573 +0000 UTC m=+1107.970645562 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/c57ca8a9-e2f8-4404-b56f-649297cba618-etc-swift") pod "swift-storage-0" (UID: "c57ca8a9-e2f8-4404-b56f-649297cba618") : configmap "swift-ring-files" not found Jan 21 15:42:45 crc kubenswrapper[5021]: I0121 15:42:45.935498 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/c57ca8a9-e2f8-4404-b56f-649297cba618-cache\") pod \"swift-storage-0\" (UID: \"c57ca8a9-e2f8-4404-b56f-649297cba618\") " pod="openstack/swift-storage-0" Jan 21 15:42:45 crc kubenswrapper[5021]: I0121 15:42:45.954494 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4zn7f\" (UniqueName: \"kubernetes.io/projected/c57ca8a9-e2f8-4404-b56f-649297cba618-kube-api-access-4zn7f\") pod \"swift-storage-0\" (UID: \"c57ca8a9-e2f8-4404-b56f-649297cba618\") " pod="openstack/swift-storage-0" Jan 21 15:42:45 crc kubenswrapper[5021]: I0121 15:42:45.958001 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"swift-storage-0\" (UID: \"c57ca8a9-e2f8-4404-b56f-649297cba618\") " pod="openstack/swift-storage-0" Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.161111 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-br7n2"] Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.162100 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-br7n2" Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.164040 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.164362 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.164638 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.197072 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-br7n2"] Jan 21 15:42:46 crc kubenswrapper[5021]: E0121 15:42:46.198069 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[combined-ca-bundle dispersionconf etc-swift kube-api-access-llfmb ring-data-devices scripts swiftconf], unattached volumes=[], failed to process volumes=[combined-ca-bundle dispersionconf etc-swift kube-api-access-llfmb ring-data-devices scripts swiftconf]: context canceled" pod="openstack/swift-ring-rebalance-br7n2" podUID="ce015829-0f2e-4b9b-a9d5-3d5556408eb8" Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.204936 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-86fx8"] Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.206680 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-86fx8" Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.212182 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-br7n2"] Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.219523 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-86fx8"] Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.341386 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/8f97c362-e247-4151-b007-2b3006b50488-dispersionconf\") pod \"swift-ring-rebalance-86fx8\" (UID: \"8f97c362-e247-4151-b007-2b3006b50488\") " pod="openstack/swift-ring-rebalance-86fx8" Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.341452 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ce015829-0f2e-4b9b-a9d5-3d5556408eb8-scripts\") pod \"swift-ring-rebalance-br7n2\" (UID: \"ce015829-0f2e-4b9b-a9d5-3d5556408eb8\") " pod="openstack/swift-ring-rebalance-br7n2" Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.341471 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/ce015829-0f2e-4b9b-a9d5-3d5556408eb8-etc-swift\") pod \"swift-ring-rebalance-br7n2\" (UID: \"ce015829-0f2e-4b9b-a9d5-3d5556408eb8\") " pod="openstack/swift-ring-rebalance-br7n2" Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.341490 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f97c362-e247-4151-b007-2b3006b50488-combined-ca-bundle\") pod \"swift-ring-rebalance-86fx8\" (UID: \"8f97c362-e247-4151-b007-2b3006b50488\") " pod="openstack/swift-ring-rebalance-86fx8" Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.341642 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/ce015829-0f2e-4b9b-a9d5-3d5556408eb8-dispersionconf\") pod \"swift-ring-rebalance-br7n2\" (UID: \"ce015829-0f2e-4b9b-a9d5-3d5556408eb8\") " pod="openstack/swift-ring-rebalance-br7n2" Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.341739 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/ce015829-0f2e-4b9b-a9d5-3d5556408eb8-ring-data-devices\") pod \"swift-ring-rebalance-br7n2\" (UID: \"ce015829-0f2e-4b9b-a9d5-3d5556408eb8\") " pod="openstack/swift-ring-rebalance-br7n2" Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.341775 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8f97c362-e247-4151-b007-2b3006b50488-scripts\") pod \"swift-ring-rebalance-86fx8\" (UID: \"8f97c362-e247-4151-b007-2b3006b50488\") " pod="openstack/swift-ring-rebalance-86fx8" Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.341803 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/8f97c362-e247-4151-b007-2b3006b50488-ring-data-devices\") pod \"swift-ring-rebalance-86fx8\" (UID: \"8f97c362-e247-4151-b007-2b3006b50488\") " pod="openstack/swift-ring-rebalance-86fx8" Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.341831 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-llfmb\" (UniqueName: \"kubernetes.io/projected/ce015829-0f2e-4b9b-a9d5-3d5556408eb8-kube-api-access-llfmb\") pod \"swift-ring-rebalance-br7n2\" (UID: \"ce015829-0f2e-4b9b-a9d5-3d5556408eb8\") " pod="openstack/swift-ring-rebalance-br7n2" Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.341858 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/8f97c362-e247-4151-b007-2b3006b50488-etc-swift\") pod \"swift-ring-rebalance-86fx8\" (UID: \"8f97c362-e247-4151-b007-2b3006b50488\") " pod="openstack/swift-ring-rebalance-86fx8" Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.341974 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/ce015829-0f2e-4b9b-a9d5-3d5556408eb8-swiftconf\") pod \"swift-ring-rebalance-br7n2\" (UID: \"ce015829-0f2e-4b9b-a9d5-3d5556408eb8\") " pod="openstack/swift-ring-rebalance-br7n2" Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.342057 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hfnnp\" (UniqueName: \"kubernetes.io/projected/8f97c362-e247-4151-b007-2b3006b50488-kube-api-access-hfnnp\") pod \"swift-ring-rebalance-86fx8\" (UID: \"8f97c362-e247-4151-b007-2b3006b50488\") " pod="openstack/swift-ring-rebalance-86fx8" Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.342128 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce015829-0f2e-4b9b-a9d5-3d5556408eb8-combined-ca-bundle\") pod \"swift-ring-rebalance-br7n2\" (UID: \"ce015829-0f2e-4b9b-a9d5-3d5556408eb8\") " pod="openstack/swift-ring-rebalance-br7n2" Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.342167 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/8f97c362-e247-4151-b007-2b3006b50488-swiftconf\") pod \"swift-ring-rebalance-86fx8\" (UID: \"8f97c362-e247-4151-b007-2b3006b50488\") " pod="openstack/swift-ring-rebalance-86fx8" Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.443720 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/8f97c362-e247-4151-b007-2b3006b50488-dispersionconf\") pod \"swift-ring-rebalance-86fx8\" (UID: \"8f97c362-e247-4151-b007-2b3006b50488\") " pod="openstack/swift-ring-rebalance-86fx8" Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.443807 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ce015829-0f2e-4b9b-a9d5-3d5556408eb8-scripts\") pod \"swift-ring-rebalance-br7n2\" (UID: \"ce015829-0f2e-4b9b-a9d5-3d5556408eb8\") " pod="openstack/swift-ring-rebalance-br7n2" Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.443843 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/ce015829-0f2e-4b9b-a9d5-3d5556408eb8-etc-swift\") pod \"swift-ring-rebalance-br7n2\" (UID: \"ce015829-0f2e-4b9b-a9d5-3d5556408eb8\") " pod="openstack/swift-ring-rebalance-br7n2" Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.443880 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f97c362-e247-4151-b007-2b3006b50488-combined-ca-bundle\") pod \"swift-ring-rebalance-86fx8\" (UID: \"8f97c362-e247-4151-b007-2b3006b50488\") " pod="openstack/swift-ring-rebalance-86fx8" Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.443957 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/ce015829-0f2e-4b9b-a9d5-3d5556408eb8-dispersionconf\") pod \"swift-ring-rebalance-br7n2\" (UID: \"ce015829-0f2e-4b9b-a9d5-3d5556408eb8\") " pod="openstack/swift-ring-rebalance-br7n2" Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.444002 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/c57ca8a9-e2f8-4404-b56f-649297cba618-etc-swift\") pod \"swift-storage-0\" (UID: \"c57ca8a9-e2f8-4404-b56f-649297cba618\") " pod="openstack/swift-storage-0" Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.444025 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/ce015829-0f2e-4b9b-a9d5-3d5556408eb8-ring-data-devices\") pod \"swift-ring-rebalance-br7n2\" (UID: \"ce015829-0f2e-4b9b-a9d5-3d5556408eb8\") " pod="openstack/swift-ring-rebalance-br7n2" Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.444067 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8f97c362-e247-4151-b007-2b3006b50488-scripts\") pod \"swift-ring-rebalance-86fx8\" (UID: \"8f97c362-e247-4151-b007-2b3006b50488\") " pod="openstack/swift-ring-rebalance-86fx8" Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.444102 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/8f97c362-e247-4151-b007-2b3006b50488-ring-data-devices\") pod \"swift-ring-rebalance-86fx8\" (UID: \"8f97c362-e247-4151-b007-2b3006b50488\") " pod="openstack/swift-ring-rebalance-86fx8" Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.444130 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-llfmb\" (UniqueName: \"kubernetes.io/projected/ce015829-0f2e-4b9b-a9d5-3d5556408eb8-kube-api-access-llfmb\") pod \"swift-ring-rebalance-br7n2\" (UID: \"ce015829-0f2e-4b9b-a9d5-3d5556408eb8\") " pod="openstack/swift-ring-rebalance-br7n2" Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.444168 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/8f97c362-e247-4151-b007-2b3006b50488-etc-swift\") pod \"swift-ring-rebalance-86fx8\" (UID: \"8f97c362-e247-4151-b007-2b3006b50488\") " pod="openstack/swift-ring-rebalance-86fx8" Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.444199 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/ce015829-0f2e-4b9b-a9d5-3d5556408eb8-swiftconf\") pod \"swift-ring-rebalance-br7n2\" (UID: \"ce015829-0f2e-4b9b-a9d5-3d5556408eb8\") " pod="openstack/swift-ring-rebalance-br7n2" Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.445307 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/ce015829-0f2e-4b9b-a9d5-3d5556408eb8-ring-data-devices\") pod \"swift-ring-rebalance-br7n2\" (UID: \"ce015829-0f2e-4b9b-a9d5-3d5556408eb8\") " pod="openstack/swift-ring-rebalance-br7n2" Jan 21 15:42:46 crc kubenswrapper[5021]: E0121 15:42:46.445318 5021 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 21 15:42:46 crc kubenswrapper[5021]: E0121 15:42:46.445382 5021 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 21 15:42:46 crc kubenswrapper[5021]: E0121 15:42:46.445427 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c57ca8a9-e2f8-4404-b56f-649297cba618-etc-swift podName:c57ca8a9-e2f8-4404-b56f-649297cba618 nodeName:}" failed. No retries permitted until 2026-01-21 15:42:47.445410489 +0000 UTC m=+1108.980524378 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/c57ca8a9-e2f8-4404-b56f-649297cba618-etc-swift") pod "swift-storage-0" (UID: "c57ca8a9-e2f8-4404-b56f-649297cba618") : configmap "swift-ring-files" not found Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.445784 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ce015829-0f2e-4b9b-a9d5-3d5556408eb8-scripts\") pod \"swift-ring-rebalance-br7n2\" (UID: \"ce015829-0f2e-4b9b-a9d5-3d5556408eb8\") " pod="openstack/swift-ring-rebalance-br7n2" Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.446008 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/ce015829-0f2e-4b9b-a9d5-3d5556408eb8-etc-swift\") pod \"swift-ring-rebalance-br7n2\" (UID: \"ce015829-0f2e-4b9b-a9d5-3d5556408eb8\") " pod="openstack/swift-ring-rebalance-br7n2" Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.444237 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hfnnp\" (UniqueName: \"kubernetes.io/projected/8f97c362-e247-4151-b007-2b3006b50488-kube-api-access-hfnnp\") pod \"swift-ring-rebalance-86fx8\" (UID: \"8f97c362-e247-4151-b007-2b3006b50488\") " pod="openstack/swift-ring-rebalance-86fx8" Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.446392 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8f97c362-e247-4151-b007-2b3006b50488-scripts\") pod \"swift-ring-rebalance-86fx8\" (UID: \"8f97c362-e247-4151-b007-2b3006b50488\") " pod="openstack/swift-ring-rebalance-86fx8" Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.446404 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce015829-0f2e-4b9b-a9d5-3d5556408eb8-combined-ca-bundle\") pod \"swift-ring-rebalance-br7n2\" (UID: \"ce015829-0f2e-4b9b-a9d5-3d5556408eb8\") " pod="openstack/swift-ring-rebalance-br7n2" Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.446477 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/8f97c362-e247-4151-b007-2b3006b50488-swiftconf\") pod \"swift-ring-rebalance-86fx8\" (UID: \"8f97c362-e247-4151-b007-2b3006b50488\") " pod="openstack/swift-ring-rebalance-86fx8" Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.446997 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/8f97c362-e247-4151-b007-2b3006b50488-etc-swift\") pod \"swift-ring-rebalance-86fx8\" (UID: \"8f97c362-e247-4151-b007-2b3006b50488\") " pod="openstack/swift-ring-rebalance-86fx8" Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.447058 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/8f97c362-e247-4151-b007-2b3006b50488-ring-data-devices\") pod \"swift-ring-rebalance-86fx8\" (UID: \"8f97c362-e247-4151-b007-2b3006b50488\") " pod="openstack/swift-ring-rebalance-86fx8" Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.449662 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f97c362-e247-4151-b007-2b3006b50488-combined-ca-bundle\") pod \"swift-ring-rebalance-86fx8\" (UID: \"8f97c362-e247-4151-b007-2b3006b50488\") " pod="openstack/swift-ring-rebalance-86fx8" Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.450310 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/8f97c362-e247-4151-b007-2b3006b50488-dispersionconf\") pod \"swift-ring-rebalance-86fx8\" (UID: \"8f97c362-e247-4151-b007-2b3006b50488\") " pod="openstack/swift-ring-rebalance-86fx8" Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.450371 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/ce015829-0f2e-4b9b-a9d5-3d5556408eb8-dispersionconf\") pod \"swift-ring-rebalance-br7n2\" (UID: \"ce015829-0f2e-4b9b-a9d5-3d5556408eb8\") " pod="openstack/swift-ring-rebalance-br7n2" Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.450701 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/ce015829-0f2e-4b9b-a9d5-3d5556408eb8-swiftconf\") pod \"swift-ring-rebalance-br7n2\" (UID: \"ce015829-0f2e-4b9b-a9d5-3d5556408eb8\") " pod="openstack/swift-ring-rebalance-br7n2" Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.452559 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce015829-0f2e-4b9b-a9d5-3d5556408eb8-combined-ca-bundle\") pod \"swift-ring-rebalance-br7n2\" (UID: \"ce015829-0f2e-4b9b-a9d5-3d5556408eb8\") " pod="openstack/swift-ring-rebalance-br7n2" Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.461462 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/8f97c362-e247-4151-b007-2b3006b50488-swiftconf\") pod \"swift-ring-rebalance-86fx8\" (UID: \"8f97c362-e247-4151-b007-2b3006b50488\") " pod="openstack/swift-ring-rebalance-86fx8" Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.465818 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-llfmb\" (UniqueName: \"kubernetes.io/projected/ce015829-0f2e-4b9b-a9d5-3d5556408eb8-kube-api-access-llfmb\") pod \"swift-ring-rebalance-br7n2\" (UID: \"ce015829-0f2e-4b9b-a9d5-3d5556408eb8\") " pod="openstack/swift-ring-rebalance-br7n2" Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.468287 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hfnnp\" (UniqueName: \"kubernetes.io/projected/8f97c362-e247-4151-b007-2b3006b50488-kube-api-access-hfnnp\") pod \"swift-ring-rebalance-86fx8\" (UID: \"8f97c362-e247-4151-b007-2b3006b50488\") " pod="openstack/swift-ring-rebalance-86fx8" Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.522177 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-86fx8" Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.970876 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-br7n2" Jan 21 15:42:46 crc kubenswrapper[5021]: I0121 15:42:46.983020 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-br7n2" Jan 21 15:42:47 crc kubenswrapper[5021]: I0121 15:42:47.160247 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/ce015829-0f2e-4b9b-a9d5-3d5556408eb8-swiftconf\") pod \"ce015829-0f2e-4b9b-a9d5-3d5556408eb8\" (UID: \"ce015829-0f2e-4b9b-a9d5-3d5556408eb8\") " Jan 21 15:42:47 crc kubenswrapper[5021]: I0121 15:42:47.160359 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ce015829-0f2e-4b9b-a9d5-3d5556408eb8-scripts\") pod \"ce015829-0f2e-4b9b-a9d5-3d5556408eb8\" (UID: \"ce015829-0f2e-4b9b-a9d5-3d5556408eb8\") " Jan 21 15:42:47 crc kubenswrapper[5021]: I0121 15:42:47.160384 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/ce015829-0f2e-4b9b-a9d5-3d5556408eb8-etc-swift\") pod \"ce015829-0f2e-4b9b-a9d5-3d5556408eb8\" (UID: \"ce015829-0f2e-4b9b-a9d5-3d5556408eb8\") " Jan 21 15:42:47 crc kubenswrapper[5021]: I0121 15:42:47.160402 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-llfmb\" (UniqueName: \"kubernetes.io/projected/ce015829-0f2e-4b9b-a9d5-3d5556408eb8-kube-api-access-llfmb\") pod \"ce015829-0f2e-4b9b-a9d5-3d5556408eb8\" (UID: \"ce015829-0f2e-4b9b-a9d5-3d5556408eb8\") " Jan 21 15:42:47 crc kubenswrapper[5021]: I0121 15:42:47.160438 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/ce015829-0f2e-4b9b-a9d5-3d5556408eb8-dispersionconf\") pod \"ce015829-0f2e-4b9b-a9d5-3d5556408eb8\" (UID: \"ce015829-0f2e-4b9b-a9d5-3d5556408eb8\") " Jan 21 15:42:47 crc kubenswrapper[5021]: I0121 15:42:47.160563 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/ce015829-0f2e-4b9b-a9d5-3d5556408eb8-ring-data-devices\") pod \"ce015829-0f2e-4b9b-a9d5-3d5556408eb8\" (UID: \"ce015829-0f2e-4b9b-a9d5-3d5556408eb8\") " Jan 21 15:42:47 crc kubenswrapper[5021]: I0121 15:42:47.160624 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce015829-0f2e-4b9b-a9d5-3d5556408eb8-combined-ca-bundle\") pod \"ce015829-0f2e-4b9b-a9d5-3d5556408eb8\" (UID: \"ce015829-0f2e-4b9b-a9d5-3d5556408eb8\") " Jan 21 15:42:47 crc kubenswrapper[5021]: I0121 15:42:47.161431 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ce015829-0f2e-4b9b-a9d5-3d5556408eb8-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "ce015829-0f2e-4b9b-a9d5-3d5556408eb8" (UID: "ce015829-0f2e-4b9b-a9d5-3d5556408eb8"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:42:47 crc kubenswrapper[5021]: I0121 15:42:47.161573 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ce015829-0f2e-4b9b-a9d5-3d5556408eb8-scripts" (OuterVolumeSpecName: "scripts") pod "ce015829-0f2e-4b9b-a9d5-3d5556408eb8" (UID: "ce015829-0f2e-4b9b-a9d5-3d5556408eb8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:42:47 crc kubenswrapper[5021]: I0121 15:42:47.161859 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ce015829-0f2e-4b9b-a9d5-3d5556408eb8-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "ce015829-0f2e-4b9b-a9d5-3d5556408eb8" (UID: "ce015829-0f2e-4b9b-a9d5-3d5556408eb8"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:42:47 crc kubenswrapper[5021]: I0121 15:42:47.165161 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ce015829-0f2e-4b9b-a9d5-3d5556408eb8-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "ce015829-0f2e-4b9b-a9d5-3d5556408eb8" (UID: "ce015829-0f2e-4b9b-a9d5-3d5556408eb8"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:42:47 crc kubenswrapper[5021]: I0121 15:42:47.167465 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ce015829-0f2e-4b9b-a9d5-3d5556408eb8-kube-api-access-llfmb" (OuterVolumeSpecName: "kube-api-access-llfmb") pod "ce015829-0f2e-4b9b-a9d5-3d5556408eb8" (UID: "ce015829-0f2e-4b9b-a9d5-3d5556408eb8"). InnerVolumeSpecName "kube-api-access-llfmb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:42:47 crc kubenswrapper[5021]: I0121 15:42:47.167494 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ce015829-0f2e-4b9b-a9d5-3d5556408eb8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ce015829-0f2e-4b9b-a9d5-3d5556408eb8" (UID: "ce015829-0f2e-4b9b-a9d5-3d5556408eb8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:42:47 crc kubenswrapper[5021]: I0121 15:42:47.167554 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ce015829-0f2e-4b9b-a9d5-3d5556408eb8-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "ce015829-0f2e-4b9b-a9d5-3d5556408eb8" (UID: "ce015829-0f2e-4b9b-a9d5-3d5556408eb8"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:42:47 crc kubenswrapper[5021]: I0121 15:42:47.262321 5021 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/ce015829-0f2e-4b9b-a9d5-3d5556408eb8-swiftconf\") on node \"crc\" DevicePath \"\"" Jan 21 15:42:47 crc kubenswrapper[5021]: I0121 15:42:47.262364 5021 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ce015829-0f2e-4b9b-a9d5-3d5556408eb8-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:42:47 crc kubenswrapper[5021]: I0121 15:42:47.262375 5021 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/ce015829-0f2e-4b9b-a9d5-3d5556408eb8-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 21 15:42:47 crc kubenswrapper[5021]: I0121 15:42:47.262386 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-llfmb\" (UniqueName: \"kubernetes.io/projected/ce015829-0f2e-4b9b-a9d5-3d5556408eb8-kube-api-access-llfmb\") on node \"crc\" DevicePath \"\"" Jan 21 15:42:47 crc kubenswrapper[5021]: I0121 15:42:47.262400 5021 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/ce015829-0f2e-4b9b-a9d5-3d5556408eb8-dispersionconf\") on node \"crc\" DevicePath \"\"" Jan 21 15:42:47 crc kubenswrapper[5021]: I0121 15:42:47.262413 5021 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/ce015829-0f2e-4b9b-a9d5-3d5556408eb8-ring-data-devices\") on node \"crc\" DevicePath \"\"" Jan 21 15:42:47 crc kubenswrapper[5021]: I0121 15:42:47.262424 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce015829-0f2e-4b9b-a9d5-3d5556408eb8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:42:47 crc kubenswrapper[5021]: I0121 15:42:47.465195 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/c57ca8a9-e2f8-4404-b56f-649297cba618-etc-swift\") pod \"swift-storage-0\" (UID: \"c57ca8a9-e2f8-4404-b56f-649297cba618\") " pod="openstack/swift-storage-0" Jan 21 15:42:47 crc kubenswrapper[5021]: E0121 15:42:47.465415 5021 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 21 15:42:47 crc kubenswrapper[5021]: E0121 15:42:47.465451 5021 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 21 15:42:47 crc kubenswrapper[5021]: E0121 15:42:47.465512 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c57ca8a9-e2f8-4404-b56f-649297cba618-etc-swift podName:c57ca8a9-e2f8-4404-b56f-649297cba618 nodeName:}" failed. No retries permitted until 2026-01-21 15:42:49.46549355 +0000 UTC m=+1111.000607439 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/c57ca8a9-e2f8-4404-b56f-649297cba618-etc-swift") pod "swift-storage-0" (UID: "c57ca8a9-e2f8-4404-b56f-649297cba618") : configmap "swift-ring-files" not found Jan 21 15:42:47 crc kubenswrapper[5021]: I0121 15:42:47.977551 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-br7n2" Jan 21 15:42:48 crc kubenswrapper[5021]: I0121 15:42:48.020113 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-br7n2"] Jan 21 15:42:48 crc kubenswrapper[5021]: I0121 15:42:48.025966 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-ring-rebalance-br7n2"] Jan 21 15:42:48 crc kubenswrapper[5021]: I0121 15:42:48.760280 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ce015829-0f2e-4b9b-a9d5-3d5556408eb8" path="/var/lib/kubelet/pods/ce015829-0f2e-4b9b-a9d5-3d5556408eb8/volumes" Jan 21 15:42:49 crc kubenswrapper[5021]: I0121 15:42:49.495779 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/c57ca8a9-e2f8-4404-b56f-649297cba618-etc-swift\") pod \"swift-storage-0\" (UID: \"c57ca8a9-e2f8-4404-b56f-649297cba618\") " pod="openstack/swift-storage-0" Jan 21 15:42:49 crc kubenswrapper[5021]: E0121 15:42:49.495998 5021 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 21 15:42:49 crc kubenswrapper[5021]: E0121 15:42:49.496034 5021 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 21 15:42:49 crc kubenswrapper[5021]: E0121 15:42:49.496096 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c57ca8a9-e2f8-4404-b56f-649297cba618-etc-swift podName:c57ca8a9-e2f8-4404-b56f-649297cba618 nodeName:}" failed. No retries permitted until 2026-01-21 15:42:53.49607117 +0000 UTC m=+1115.031185049 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/c57ca8a9-e2f8-4404-b56f-649297cba618-etc-swift") pod "swift-storage-0" (UID: "c57ca8a9-e2f8-4404-b56f-649297cba618") : configmap "swift-ring-files" not found Jan 21 15:42:53 crc kubenswrapper[5021]: I0121 15:42:53.355461 5021 scope.go:117] "RemoveContainer" containerID="b71f3a863178fd22ed7e36c7aa27e4f6eeebc73c78ab487fce13675c5ca4ffa6" Jan 21 15:42:53 crc kubenswrapper[5021]: I0121 15:42:53.563467 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/c57ca8a9-e2f8-4404-b56f-649297cba618-etc-swift\") pod \"swift-storage-0\" (UID: \"c57ca8a9-e2f8-4404-b56f-649297cba618\") " pod="openstack/swift-storage-0" Jan 21 15:42:53 crc kubenswrapper[5021]: E0121 15:42:53.563646 5021 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 21 15:42:53 crc kubenswrapper[5021]: E0121 15:42:53.563939 5021 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 21 15:42:53 crc kubenswrapper[5021]: E0121 15:42:53.564022 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c57ca8a9-e2f8-4404-b56f-649297cba618-etc-swift podName:c57ca8a9-e2f8-4404-b56f-649297cba618 nodeName:}" failed. No retries permitted until 2026-01-21 15:43:01.563996526 +0000 UTC m=+1123.099110415 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/c57ca8a9-e2f8-4404-b56f-649297cba618-etc-swift") pod "swift-storage-0" (UID: "c57ca8a9-e2f8-4404-b56f-649297cba618") : configmap "swift-ring-files" not found Jan 21 15:42:53 crc kubenswrapper[5021]: I0121 15:42:53.791362 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-86fx8"] Jan 21 15:42:54 crc kubenswrapper[5021]: I0121 15:42:54.017877 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-hmcx6" event={"ID":"bc460a0a-094d-4c9d-882f-3bd263f6a4c3","Type":"ContainerStarted","Data":"030cd3aad1e5e6e8c5803da74c5fba479a88a767b135c8d05eda00b80a1fb5d6"} Jan 21 15:42:54 crc kubenswrapper[5021]: I0121 15:42:54.021018 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-86fx8" event={"ID":"8f97c362-e247-4151-b007-2b3006b50488","Type":"ContainerStarted","Data":"ce26cbf095a6fad5e3b1e1fc4459e247482fbcde46bfc6a282fda29cf17ef670"} Jan 21 15:42:54 crc kubenswrapper[5021]: I0121 15:42:54.022524 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"e360fedf-3856-4f89-980d-f5f282e2f696","Type":"ContainerStarted","Data":"c067d31a71ae2cf59c27b06623ab31d53c5bbafc2b68dca60289c51f63eb9fbd"} Jan 21 15:42:58 crc kubenswrapper[5021]: I0121 15:42:58.057747 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-hmcx6" event={"ID":"bc460a0a-094d-4c9d-882f-3bd263f6a4c3","Type":"ContainerStarted","Data":"4ff75ecc9879f9ef7c2c63daca2d6a319aca60b1fdeec2f4c91577214aa4ee48"} Jan 21 15:43:00 crc kubenswrapper[5021]: I0121 15:43:00.077163 5021 generic.go:334] "Generic (PLEG): container finished" podID="b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b" containerID="d0442856928dc1e5b8f3e11f88a250e0738b0f4e137890a5b28dc4c331684638" exitCode=0 Jan 21 15:43:00 crc kubenswrapper[5021]: I0121 15:43:00.077253 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b","Type":"ContainerDied","Data":"d0442856928dc1e5b8f3e11f88a250e0738b0f4e137890a5b28dc4c331684638"} Jan 21 15:43:00 crc kubenswrapper[5021]: I0121 15:43:00.081461 5021 generic.go:334] "Generic (PLEG): container finished" podID="bc460a0a-094d-4c9d-882f-3bd263f6a4c3" containerID="4ff75ecc9879f9ef7c2c63daca2d6a319aca60b1fdeec2f4c91577214aa4ee48" exitCode=0 Jan 21 15:43:00 crc kubenswrapper[5021]: I0121 15:43:00.081523 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-hmcx6" event={"ID":"bc460a0a-094d-4c9d-882f-3bd263f6a4c3","Type":"ContainerDied","Data":"4ff75ecc9879f9ef7c2c63daca2d6a319aca60b1fdeec2f4c91577214aa4ee48"} Jan 21 15:43:00 crc kubenswrapper[5021]: I0121 15:43:00.084502 5021 generic.go:334] "Generic (PLEG): container finished" podID="2dff28e1-6d0f-4a7d-8fcf-0edf26e63825" containerID="4d967a419656e209fc1a4f481f044e1e7ce77cfe738f17ca1985fd75b33cc897" exitCode=0 Jan 21 15:43:00 crc kubenswrapper[5021]: I0121 15:43:00.084632 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825","Type":"ContainerDied","Data":"4d967a419656e209fc1a4f481f044e1e7ce77cfe738f17ca1985fd75b33cc897"} Jan 21 15:43:01 crc kubenswrapper[5021]: I0121 15:43:01.609301 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/c57ca8a9-e2f8-4404-b56f-649297cba618-etc-swift\") pod \"swift-storage-0\" (UID: \"c57ca8a9-e2f8-4404-b56f-649297cba618\") " pod="openstack/swift-storage-0" Jan 21 15:43:01 crc kubenswrapper[5021]: E0121 15:43:01.609522 5021 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 21 15:43:01 crc kubenswrapper[5021]: E0121 15:43:01.609664 5021 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 21 15:43:01 crc kubenswrapper[5021]: E0121 15:43:01.609722 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c57ca8a9-e2f8-4404-b56f-649297cba618-etc-swift podName:c57ca8a9-e2f8-4404-b56f-649297cba618 nodeName:}" failed. No retries permitted until 2026-01-21 15:43:17.609704529 +0000 UTC m=+1139.144818418 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/c57ca8a9-e2f8-4404-b56f-649297cba618-etc-swift") pod "swift-storage-0" (UID: "c57ca8a9-e2f8-4404-b56f-649297cba618") : configmap "swift-ring-files" not found Jan 21 15:43:04 crc kubenswrapper[5021]: I0121 15:43:04.478840 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Jan 21 15:43:04 crc kubenswrapper[5021]: I0121 15:43:04.554152 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Jan 21 15:43:07 crc kubenswrapper[5021]: I0121 15:43:07.131424 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-hmcx6" event={"ID":"bc460a0a-094d-4c9d-882f-3bd263f6a4c3","Type":"ContainerStarted","Data":"f379b38122b79af8352111451a3c240ec9ee109be0b0e1e000c38625d2933c70"} Jan 21 15:43:07 crc kubenswrapper[5021]: I0121 15:43:07.132275 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-b8fbc5445-hmcx6" Jan 21 15:43:07 crc kubenswrapper[5021]: I0121 15:43:07.133593 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825","Type":"ContainerStarted","Data":"44ab8303cf36ed1256a72700def0f8fdb1a1e4a5f2dd2a14ca80a744759920ec"} Jan 21 15:43:07 crc kubenswrapper[5021]: I0121 15:43:07.133794 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Jan 21 15:43:07 crc kubenswrapper[5021]: I0121 15:43:07.135747 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b","Type":"ContainerStarted","Data":"d55fd3560f293f7b3d5438cf1c04fd0d68375dd2c61252e90dab6e4eb53445b2"} Jan 21 15:43:07 crc kubenswrapper[5021]: I0121 15:43:07.135968 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Jan 21 15:43:07 crc kubenswrapper[5021]: I0121 15:43:07.137332 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-86fx8" event={"ID":"8f97c362-e247-4151-b007-2b3006b50488","Type":"ContainerStarted","Data":"bfac950ffa328d62ce1ff0c4ca1ef8145a00d3450ad701a49fae566a5600191f"} Jan 21 15:43:07 crc kubenswrapper[5021]: I0121 15:43:07.138833 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"e360fedf-3856-4f89-980d-f5f282e2f696","Type":"ContainerStarted","Data":"55ece29f6857bcec4db5996a88f5ec90a9d812572bf1153d37ef4b809c300168"} Jan 21 15:43:07 crc kubenswrapper[5021]: I0121 15:43:07.138881 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"e360fedf-3856-4f89-980d-f5f282e2f696","Type":"ContainerStarted","Data":"894abc06fdb5fc6222a6ec829e8805ef293fd5d9f74c877b7ed68626d7794fdd"} Jan 21 15:43:07 crc kubenswrapper[5021]: I0121 15:43:07.153519 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-b8fbc5445-hmcx6" podStartSLOduration=24.153501389 podStartE2EDuration="24.153501389s" podCreationTimestamp="2026-01-21 15:42:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:43:07.150863587 +0000 UTC m=+1128.685977536" watchObservedRunningTime="2026-01-21 15:43:07.153501389 +0000 UTC m=+1128.688615278" Jan 21 15:43:07 crc kubenswrapper[5021]: I0121 15:43:07.174373 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=45.360746908 podStartE2EDuration="1m31.17435417s" podCreationTimestamp="2026-01-21 15:41:36 +0000 UTC" firstStartedPulling="2026-01-21 15:41:38.948645595 +0000 UTC m=+1040.483759484" lastFinishedPulling="2026-01-21 15:42:24.762252857 +0000 UTC m=+1086.297366746" observedRunningTime="2026-01-21 15:43:07.172081887 +0000 UTC m=+1128.707195766" watchObservedRunningTime="2026-01-21 15:43:07.17435417 +0000 UTC m=+1128.709468059" Jan 21 15:43:07 crc kubenswrapper[5021]: I0121 15:43:07.192189 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-86fx8" podStartSLOduration=9.294509563 podStartE2EDuration="21.192167548s" podCreationTimestamp="2026-01-21 15:42:46 +0000 UTC" firstStartedPulling="2026-01-21 15:42:53.793719714 +0000 UTC m=+1115.328833603" lastFinishedPulling="2026-01-21 15:43:05.691377699 +0000 UTC m=+1127.226491588" observedRunningTime="2026-01-21 15:43:07.187963853 +0000 UTC m=+1128.723077742" watchObservedRunningTime="2026-01-21 15:43:07.192167548 +0000 UTC m=+1128.727281437" Jan 21 15:43:07 crc kubenswrapper[5021]: I0121 15:43:07.216864 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=-9223371946.637926 podStartE2EDuration="1m30.216849173s" podCreationTimestamp="2026-01-21 15:41:37 +0000 UTC" firstStartedPulling="2026-01-21 15:41:39.151361394 +0000 UTC m=+1040.686475283" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:43:07.212018311 +0000 UTC m=+1128.747132200" watchObservedRunningTime="2026-01-21 15:43:07.216849173 +0000 UTC m=+1128.751963062" Jan 21 15:43:07 crc kubenswrapper[5021]: I0121 15:43:07.552295 5021 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-xqkct" podUID="a5d30216-0406-4ff3-a645-880381c2a661" containerName="ovn-controller" probeResult="failure" output=< Jan 21 15:43:07 crc kubenswrapper[5021]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Jan 21 15:43:07 crc kubenswrapper[5021]: > Jan 21 15:43:07 crc kubenswrapper[5021]: I0121 15:43:07.602691 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-bk98m" Jan 21 15:43:07 crc kubenswrapper[5021]: I0121 15:43:07.606261 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-bk98m" Jan 21 15:43:07 crc kubenswrapper[5021]: I0121 15:43:07.887805 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Jan 21 15:43:07 crc kubenswrapper[5021]: I0121 15:43:07.942441 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-xqkct-config-b6sdb"] Jan 21 15:43:07 crc kubenswrapper[5021]: I0121 15:43:07.943414 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-xqkct-config-b6sdb" Jan 21 15:43:07 crc kubenswrapper[5021]: I0121 15:43:07.949656 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Jan 21 15:43:07 crc kubenswrapper[5021]: I0121 15:43:07.953058 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-xqkct-config-b6sdb"] Jan 21 15:43:08 crc kubenswrapper[5021]: I0121 15:43:08.010340 5021 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="4184ba08-6582-4367-abd3-9e9cffb5b716" containerName="galera" probeResult="failure" output=< Jan 21 15:43:08 crc kubenswrapper[5021]: wsrep_local_state_comment (Joined) differs from Synced Jan 21 15:43:08 crc kubenswrapper[5021]: > Jan 21 15:43:08 crc kubenswrapper[5021]: I0121 15:43:08.054480 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe-additional-scripts\") pod \"ovn-controller-xqkct-config-b6sdb\" (UID: \"de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe\") " pod="openstack/ovn-controller-xqkct-config-b6sdb" Jan 21 15:43:08 crc kubenswrapper[5021]: I0121 15:43:08.054887 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe-var-run\") pod \"ovn-controller-xqkct-config-b6sdb\" (UID: \"de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe\") " pod="openstack/ovn-controller-xqkct-config-b6sdb" Jan 21 15:43:08 crc kubenswrapper[5021]: I0121 15:43:08.054955 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe-var-run-ovn\") pod \"ovn-controller-xqkct-config-b6sdb\" (UID: \"de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe\") " pod="openstack/ovn-controller-xqkct-config-b6sdb" Jan 21 15:43:08 crc kubenswrapper[5021]: I0121 15:43:08.054992 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe-var-log-ovn\") pod \"ovn-controller-xqkct-config-b6sdb\" (UID: \"de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe\") " pod="openstack/ovn-controller-xqkct-config-b6sdb" Jan 21 15:43:08 crc kubenswrapper[5021]: I0121 15:43:08.055028 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe-scripts\") pod \"ovn-controller-xqkct-config-b6sdb\" (UID: \"de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe\") " pod="openstack/ovn-controller-xqkct-config-b6sdb" Jan 21 15:43:08 crc kubenswrapper[5021]: I0121 15:43:08.055072 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j4gwz\" (UniqueName: \"kubernetes.io/projected/de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe-kube-api-access-j4gwz\") pod \"ovn-controller-xqkct-config-b6sdb\" (UID: \"de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe\") " pod="openstack/ovn-controller-xqkct-config-b6sdb" Jan 21 15:43:08 crc kubenswrapper[5021]: I0121 15:43:08.156411 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe-additional-scripts\") pod \"ovn-controller-xqkct-config-b6sdb\" (UID: \"de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe\") " pod="openstack/ovn-controller-xqkct-config-b6sdb" Jan 21 15:43:08 crc kubenswrapper[5021]: I0121 15:43:08.156888 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe-var-run\") pod \"ovn-controller-xqkct-config-b6sdb\" (UID: \"de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe\") " pod="openstack/ovn-controller-xqkct-config-b6sdb" Jan 21 15:43:08 crc kubenswrapper[5021]: I0121 15:43:08.157035 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe-var-run-ovn\") pod \"ovn-controller-xqkct-config-b6sdb\" (UID: \"de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe\") " pod="openstack/ovn-controller-xqkct-config-b6sdb" Jan 21 15:43:08 crc kubenswrapper[5021]: I0121 15:43:08.157069 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe-var-log-ovn\") pod \"ovn-controller-xqkct-config-b6sdb\" (UID: \"de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe\") " pod="openstack/ovn-controller-xqkct-config-b6sdb" Jan 21 15:43:08 crc kubenswrapper[5021]: I0121 15:43:08.157095 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe-scripts\") pod \"ovn-controller-xqkct-config-b6sdb\" (UID: \"de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe\") " pod="openstack/ovn-controller-xqkct-config-b6sdb" Jan 21 15:43:08 crc kubenswrapper[5021]: I0121 15:43:08.157131 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j4gwz\" (UniqueName: \"kubernetes.io/projected/de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe-kube-api-access-j4gwz\") pod \"ovn-controller-xqkct-config-b6sdb\" (UID: \"de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe\") " pod="openstack/ovn-controller-xqkct-config-b6sdb" Jan 21 15:43:08 crc kubenswrapper[5021]: I0121 15:43:08.157276 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe-additional-scripts\") pod \"ovn-controller-xqkct-config-b6sdb\" (UID: \"de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe\") " pod="openstack/ovn-controller-xqkct-config-b6sdb" Jan 21 15:43:08 crc kubenswrapper[5021]: I0121 15:43:08.157384 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe-var-log-ovn\") pod \"ovn-controller-xqkct-config-b6sdb\" (UID: \"de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe\") " pod="openstack/ovn-controller-xqkct-config-b6sdb" Jan 21 15:43:08 crc kubenswrapper[5021]: I0121 15:43:08.157440 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe-var-run-ovn\") pod \"ovn-controller-xqkct-config-b6sdb\" (UID: \"de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe\") " pod="openstack/ovn-controller-xqkct-config-b6sdb" Jan 21 15:43:08 crc kubenswrapper[5021]: I0121 15:43:08.157470 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe-var-run\") pod \"ovn-controller-xqkct-config-b6sdb\" (UID: \"de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe\") " pod="openstack/ovn-controller-xqkct-config-b6sdb" Jan 21 15:43:08 crc kubenswrapper[5021]: I0121 15:43:08.159404 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe-scripts\") pod \"ovn-controller-xqkct-config-b6sdb\" (UID: \"de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe\") " pod="openstack/ovn-controller-xqkct-config-b6sdb" Jan 21 15:43:08 crc kubenswrapper[5021]: I0121 15:43:08.181118 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=12.859337722 podStartE2EDuration="25.181098136s" podCreationTimestamp="2026-01-21 15:42:43 +0000 UTC" firstStartedPulling="2026-01-21 15:42:53.369606275 +0000 UTC m=+1114.904720164" lastFinishedPulling="2026-01-21 15:43:05.691366689 +0000 UTC m=+1127.226480578" observedRunningTime="2026-01-21 15:43:08.172038277 +0000 UTC m=+1129.707152176" watchObservedRunningTime="2026-01-21 15:43:08.181098136 +0000 UTC m=+1129.716212025" Jan 21 15:43:08 crc kubenswrapper[5021]: I0121 15:43:08.194649 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j4gwz\" (UniqueName: \"kubernetes.io/projected/de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe-kube-api-access-j4gwz\") pod \"ovn-controller-xqkct-config-b6sdb\" (UID: \"de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe\") " pod="openstack/ovn-controller-xqkct-config-b6sdb" Jan 21 15:43:08 crc kubenswrapper[5021]: I0121 15:43:08.261184 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-xqkct-config-b6sdb" Jan 21 15:43:08 crc kubenswrapper[5021]: I0121 15:43:08.550333 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Jan 21 15:43:08 crc kubenswrapper[5021]: I0121 15:43:08.781057 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-xqkct-config-b6sdb"] Jan 21 15:43:08 crc kubenswrapper[5021]: W0121 15:43:08.788611 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podde5b7a11_4f5d_48ff_8fbe_9b4334dfd6fe.slice/crio-1c7ff2280ca366787a8a5987a7b5f7fd2bed57a5749ea2f03d3b1be578d19f5c WatchSource:0}: Error finding container 1c7ff2280ca366787a8a5987a7b5f7fd2bed57a5749ea2f03d3b1be578d19f5c: Status 404 returned error can't find the container with id 1c7ff2280ca366787a8a5987a7b5f7fd2bed57a5749ea2f03d3b1be578d19f5c Jan 21 15:43:09 crc kubenswrapper[5021]: I0121 15:43:09.154592 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-xqkct-config-b6sdb" event={"ID":"de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe","Type":"ContainerStarted","Data":"1ebf274ec023a6a6faddf6806722fad6f7aed2d5c92575b0db6e591fcee16298"} Jan 21 15:43:09 crc kubenswrapper[5021]: I0121 15:43:09.154934 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-xqkct-config-b6sdb" event={"ID":"de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe","Type":"ContainerStarted","Data":"1c7ff2280ca366787a8a5987a7b5f7fd2bed57a5749ea2f03d3b1be578d19f5c"} Jan 21 15:43:09 crc kubenswrapper[5021]: I0121 15:43:09.174554 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-xqkct-config-b6sdb" podStartSLOduration=2.174532536 podStartE2EDuration="2.174532536s" podCreationTimestamp="2026-01-21 15:43:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:43:09.168658516 +0000 UTC m=+1130.703772405" watchObservedRunningTime="2026-01-21 15:43:09.174532536 +0000 UTC m=+1130.709646435" Jan 21 15:43:10 crc kubenswrapper[5021]: I0121 15:43:10.163746 5021 generic.go:334] "Generic (PLEG): container finished" podID="de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe" containerID="1ebf274ec023a6a6faddf6806722fad6f7aed2d5c92575b0db6e591fcee16298" exitCode=0 Jan 21 15:43:10 crc kubenswrapper[5021]: I0121 15:43:10.163842 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-xqkct-config-b6sdb" event={"ID":"de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe","Type":"ContainerDied","Data":"1ebf274ec023a6a6faddf6806722fad6f7aed2d5c92575b0db6e591fcee16298"} Jan 21 15:43:10 crc kubenswrapper[5021]: I0121 15:43:10.205344 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-kblbs"] Jan 21 15:43:10 crc kubenswrapper[5021]: I0121 15:43:10.206571 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-kblbs" Jan 21 15:43:10 crc kubenswrapper[5021]: I0121 15:43:10.209415 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-mariadb-root-db-secret" Jan 21 15:43:10 crc kubenswrapper[5021]: I0121 15:43:10.214800 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-kblbs"] Jan 21 15:43:10 crc kubenswrapper[5021]: I0121 15:43:10.236246 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Jan 21 15:43:10 crc kubenswrapper[5021]: I0121 15:43:10.294700 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/88f7e9ea-7874-47ee-8766-0279d2ea35b5-operator-scripts\") pod \"root-account-create-update-kblbs\" (UID: \"88f7e9ea-7874-47ee-8766-0279d2ea35b5\") " pod="openstack/root-account-create-update-kblbs" Jan 21 15:43:10 crc kubenswrapper[5021]: I0121 15:43:10.294827 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kwpqz\" (UniqueName: \"kubernetes.io/projected/88f7e9ea-7874-47ee-8766-0279d2ea35b5-kube-api-access-kwpqz\") pod \"root-account-create-update-kblbs\" (UID: \"88f7e9ea-7874-47ee-8766-0279d2ea35b5\") " pod="openstack/root-account-create-update-kblbs" Jan 21 15:43:10 crc kubenswrapper[5021]: I0121 15:43:10.397104 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/88f7e9ea-7874-47ee-8766-0279d2ea35b5-operator-scripts\") pod \"root-account-create-update-kblbs\" (UID: \"88f7e9ea-7874-47ee-8766-0279d2ea35b5\") " pod="openstack/root-account-create-update-kblbs" Jan 21 15:43:10 crc kubenswrapper[5021]: I0121 15:43:10.397188 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kwpqz\" (UniqueName: \"kubernetes.io/projected/88f7e9ea-7874-47ee-8766-0279d2ea35b5-kube-api-access-kwpqz\") pod \"root-account-create-update-kblbs\" (UID: \"88f7e9ea-7874-47ee-8766-0279d2ea35b5\") " pod="openstack/root-account-create-update-kblbs" Jan 21 15:43:10 crc kubenswrapper[5021]: I0121 15:43:10.397861 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/88f7e9ea-7874-47ee-8766-0279d2ea35b5-operator-scripts\") pod \"root-account-create-update-kblbs\" (UID: \"88f7e9ea-7874-47ee-8766-0279d2ea35b5\") " pod="openstack/root-account-create-update-kblbs" Jan 21 15:43:10 crc kubenswrapper[5021]: I0121 15:43:10.419548 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kwpqz\" (UniqueName: \"kubernetes.io/projected/88f7e9ea-7874-47ee-8766-0279d2ea35b5-kube-api-access-kwpqz\") pod \"root-account-create-update-kblbs\" (UID: \"88f7e9ea-7874-47ee-8766-0279d2ea35b5\") " pod="openstack/root-account-create-update-kblbs" Jan 21 15:43:10 crc kubenswrapper[5021]: I0121 15:43:10.522454 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-kblbs" Jan 21 15:43:10 crc kubenswrapper[5021]: I0121 15:43:10.970241 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-kblbs"] Jan 21 15:43:10 crc kubenswrapper[5021]: W0121 15:43:10.974234 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod88f7e9ea_7874_47ee_8766_0279d2ea35b5.slice/crio-50fd1be2deb4f981320d673348af2ac3231ef66933ebd209ac4c7572ea875ee1 WatchSource:0}: Error finding container 50fd1be2deb4f981320d673348af2ac3231ef66933ebd209ac4c7572ea875ee1: Status 404 returned error can't find the container with id 50fd1be2deb4f981320d673348af2ac3231ef66933ebd209ac4c7572ea875ee1 Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.170863 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-kblbs" event={"ID":"88f7e9ea-7874-47ee-8766-0279d2ea35b5","Type":"ContainerStarted","Data":"50fd1be2deb4f981320d673348af2ac3231ef66933ebd209ac4c7572ea875ee1"} Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.314887 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-ckt26"] Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.316194 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-ckt26" Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.332683 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-ckt26"] Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.419936 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jgksh\" (UniqueName: \"kubernetes.io/projected/966fe4ae-3218-4fe1-ac33-d3731130f13a-kube-api-access-jgksh\") pod \"keystone-db-create-ckt26\" (UID: \"966fe4ae-3218-4fe1-ac33-d3731130f13a\") " pod="openstack/keystone-db-create-ckt26" Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.422375 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-0c6d-account-create-update-glfz4"] Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.420017 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/966fe4ae-3218-4fe1-ac33-d3731130f13a-operator-scripts\") pod \"keystone-db-create-ckt26\" (UID: \"966fe4ae-3218-4fe1-ac33-d3731130f13a\") " pod="openstack/keystone-db-create-ckt26" Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.424769 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-0c6d-account-create-update-glfz4" Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.427169 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.430145 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-0c6d-account-create-update-glfz4"] Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.525040 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nfphx\" (UniqueName: \"kubernetes.io/projected/791aef8a-49df-41be-8a61-7837ae62a00a-kube-api-access-nfphx\") pod \"keystone-0c6d-account-create-update-glfz4\" (UID: \"791aef8a-49df-41be-8a61-7837ae62a00a\") " pod="openstack/keystone-0c6d-account-create-update-glfz4" Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.525143 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/791aef8a-49df-41be-8a61-7837ae62a00a-operator-scripts\") pod \"keystone-0c6d-account-create-update-glfz4\" (UID: \"791aef8a-49df-41be-8a61-7837ae62a00a\") " pod="openstack/keystone-0c6d-account-create-update-glfz4" Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.525195 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jgksh\" (UniqueName: \"kubernetes.io/projected/966fe4ae-3218-4fe1-ac33-d3731130f13a-kube-api-access-jgksh\") pod \"keystone-db-create-ckt26\" (UID: \"966fe4ae-3218-4fe1-ac33-d3731130f13a\") " pod="openstack/keystone-db-create-ckt26" Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.525229 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/966fe4ae-3218-4fe1-ac33-d3731130f13a-operator-scripts\") pod \"keystone-db-create-ckt26\" (UID: \"966fe4ae-3218-4fe1-ac33-d3731130f13a\") " pod="openstack/keystone-db-create-ckt26" Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.526141 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/966fe4ae-3218-4fe1-ac33-d3731130f13a-operator-scripts\") pod \"keystone-db-create-ckt26\" (UID: \"966fe4ae-3218-4fe1-ac33-d3731130f13a\") " pod="openstack/keystone-db-create-ckt26" Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.544740 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jgksh\" (UniqueName: \"kubernetes.io/projected/966fe4ae-3218-4fe1-ac33-d3731130f13a-kube-api-access-jgksh\") pod \"keystone-db-create-ckt26\" (UID: \"966fe4ae-3218-4fe1-ac33-d3731130f13a\") " pod="openstack/keystone-db-create-ckt26" Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.597146 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-xqkct-config-b6sdb" Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.617091 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-dkzj5"] Jan 21 15:43:11 crc kubenswrapper[5021]: E0121 15:43:11.617483 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe" containerName="ovn-config" Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.617505 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe" containerName="ovn-config" Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.617659 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe" containerName="ovn-config" Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.618224 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-dkzj5" Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.626548 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/791aef8a-49df-41be-8a61-7837ae62a00a-operator-scripts\") pod \"keystone-0c6d-account-create-update-glfz4\" (UID: \"791aef8a-49df-41be-8a61-7837ae62a00a\") " pod="openstack/keystone-0c6d-account-create-update-glfz4" Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.626709 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nfphx\" (UniqueName: \"kubernetes.io/projected/791aef8a-49df-41be-8a61-7837ae62a00a-kube-api-access-nfphx\") pod \"keystone-0c6d-account-create-update-glfz4\" (UID: \"791aef8a-49df-41be-8a61-7837ae62a00a\") " pod="openstack/keystone-0c6d-account-create-update-glfz4" Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.627384 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/791aef8a-49df-41be-8a61-7837ae62a00a-operator-scripts\") pod \"keystone-0c6d-account-create-update-glfz4\" (UID: \"791aef8a-49df-41be-8a61-7837ae62a00a\") " pod="openstack/keystone-0c6d-account-create-update-glfz4" Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.641153 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-ckt26" Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.646605 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-dkzj5"] Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.650872 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nfphx\" (UniqueName: \"kubernetes.io/projected/791aef8a-49df-41be-8a61-7837ae62a00a-kube-api-access-nfphx\") pod \"keystone-0c6d-account-create-update-glfz4\" (UID: \"791aef8a-49df-41be-8a61-7837ae62a00a\") " pod="openstack/keystone-0c6d-account-create-update-glfz4" Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.728448 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe-var-run-ovn\") pod \"de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe\" (UID: \"de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe\") " Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.728861 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j4gwz\" (UniqueName: \"kubernetes.io/projected/de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe-kube-api-access-j4gwz\") pod \"de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe\" (UID: \"de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe\") " Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.728602 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe" (UID: "de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.728889 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe-var-log-ovn\") pod \"de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe\" (UID: \"de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe\") " Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.729103 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe-additional-scripts\") pod \"de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe\" (UID: \"de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe\") " Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.729178 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe-var-run\") pod \"de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe\" (UID: \"de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe\") " Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.729288 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe-scripts\") pod \"de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe\" (UID: \"de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe\") " Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.729517 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zj76j\" (UniqueName: \"kubernetes.io/projected/b88b0f54-d71c-4296-a3e8-770209fbfbc6-kube-api-access-zj76j\") pod \"placement-db-create-dkzj5\" (UID: \"b88b0f54-d71c-4296-a3e8-770209fbfbc6\") " pod="openstack/placement-db-create-dkzj5" Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.728944 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe" (UID: "de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.729344 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe-var-run" (OuterVolumeSpecName: "var-run") pod "de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe" (UID: "de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.729627 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe" (UID: "de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.729890 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b88b0f54-d71c-4296-a3e8-770209fbfbc6-operator-scripts\") pod \"placement-db-create-dkzj5\" (UID: \"b88b0f54-d71c-4296-a3e8-770209fbfbc6\") " pod="openstack/placement-db-create-dkzj5" Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.730309 5021 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe-var-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.730323 5021 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe-var-log-ovn\") on node \"crc\" DevicePath \"\"" Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.730331 5021 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe-additional-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.730340 5021 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe-var-run\") on node \"crc\" DevicePath \"\"" Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.730350 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe-scripts" (OuterVolumeSpecName: "scripts") pod "de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe" (UID: "de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.733050 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe-kube-api-access-j4gwz" (OuterVolumeSpecName: "kube-api-access-j4gwz") pod "de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe" (UID: "de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe"). InnerVolumeSpecName "kube-api-access-j4gwz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.738047 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-0c6d-account-create-update-glfz4" Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.747530 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-0608-account-create-update-rgw6j"] Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.784657 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-0608-account-create-update-rgw6j" Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.784569 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-0608-account-create-update-rgw6j"] Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.787963 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.832061 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zj76j\" (UniqueName: \"kubernetes.io/projected/b88b0f54-d71c-4296-a3e8-770209fbfbc6-kube-api-access-zj76j\") pod \"placement-db-create-dkzj5\" (UID: \"b88b0f54-d71c-4296-a3e8-770209fbfbc6\") " pod="openstack/placement-db-create-dkzj5" Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.832159 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b88b0f54-d71c-4296-a3e8-770209fbfbc6-operator-scripts\") pod \"placement-db-create-dkzj5\" (UID: \"b88b0f54-d71c-4296-a3e8-770209fbfbc6\") " pod="openstack/placement-db-create-dkzj5" Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.832268 5021 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.832279 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j4gwz\" (UniqueName: \"kubernetes.io/projected/de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe-kube-api-access-j4gwz\") on node \"crc\" DevicePath \"\"" Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.837037 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b88b0f54-d71c-4296-a3e8-770209fbfbc6-operator-scripts\") pod \"placement-db-create-dkzj5\" (UID: \"b88b0f54-d71c-4296-a3e8-770209fbfbc6\") " pod="openstack/placement-db-create-dkzj5" Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.852413 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zj76j\" (UniqueName: \"kubernetes.io/projected/b88b0f54-d71c-4296-a3e8-770209fbfbc6-kube-api-access-zj76j\") pod \"placement-db-create-dkzj5\" (UID: \"b88b0f54-d71c-4296-a3e8-770209fbfbc6\") " pod="openstack/placement-db-create-dkzj5" Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.933336 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-shrn8\" (UniqueName: \"kubernetes.io/projected/a7d0dd68-feb3-44e7-8f06-a94cc8ce3c81-kube-api-access-shrn8\") pod \"placement-0608-account-create-update-rgw6j\" (UID: \"a7d0dd68-feb3-44e7-8f06-a94cc8ce3c81\") " pod="openstack/placement-0608-account-create-update-rgw6j" Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.933392 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a7d0dd68-feb3-44e7-8f06-a94cc8ce3c81-operator-scripts\") pod \"placement-0608-account-create-update-rgw6j\" (UID: \"a7d0dd68-feb3-44e7-8f06-a94cc8ce3c81\") " pod="openstack/placement-0608-account-create-update-rgw6j" Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.936317 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-5zlsq"] Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.937395 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-5zlsq" Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.941641 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-dkzj5" Jan 21 15:43:11 crc kubenswrapper[5021]: I0121 15:43:11.946721 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-5zlsq"] Jan 21 15:43:12 crc kubenswrapper[5021]: I0121 15:43:12.048382 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wrw89\" (UniqueName: \"kubernetes.io/projected/898b9fde-4ac9-449b-9ede-ea24a67e38e9-kube-api-access-wrw89\") pod \"glance-db-create-5zlsq\" (UID: \"898b9fde-4ac9-449b-9ede-ea24a67e38e9\") " pod="openstack/glance-db-create-5zlsq" Jan 21 15:43:12 crc kubenswrapper[5021]: I0121 15:43:12.048441 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a7d0dd68-feb3-44e7-8f06-a94cc8ce3c81-operator-scripts\") pod \"placement-0608-account-create-update-rgw6j\" (UID: \"a7d0dd68-feb3-44e7-8f06-a94cc8ce3c81\") " pod="openstack/placement-0608-account-create-update-rgw6j" Jan 21 15:43:12 crc kubenswrapper[5021]: I0121 15:43:12.049281 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a7d0dd68-feb3-44e7-8f06-a94cc8ce3c81-operator-scripts\") pod \"placement-0608-account-create-update-rgw6j\" (UID: \"a7d0dd68-feb3-44e7-8f06-a94cc8ce3c81\") " pod="openstack/placement-0608-account-create-update-rgw6j" Jan 21 15:43:12 crc kubenswrapper[5021]: I0121 15:43:12.049448 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/898b9fde-4ac9-449b-9ede-ea24a67e38e9-operator-scripts\") pod \"glance-db-create-5zlsq\" (UID: \"898b9fde-4ac9-449b-9ede-ea24a67e38e9\") " pod="openstack/glance-db-create-5zlsq" Jan 21 15:43:12 crc kubenswrapper[5021]: I0121 15:43:12.049704 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-shrn8\" (UniqueName: \"kubernetes.io/projected/a7d0dd68-feb3-44e7-8f06-a94cc8ce3c81-kube-api-access-shrn8\") pod \"placement-0608-account-create-update-rgw6j\" (UID: \"a7d0dd68-feb3-44e7-8f06-a94cc8ce3c81\") " pod="openstack/placement-0608-account-create-update-rgw6j" Jan 21 15:43:12 crc kubenswrapper[5021]: I0121 15:43:12.057932 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-fba3-account-create-update-mmtzp"] Jan 21 15:43:12 crc kubenswrapper[5021]: I0121 15:43:12.059071 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-fba3-account-create-update-mmtzp" Jan 21 15:43:12 crc kubenswrapper[5021]: I0121 15:43:12.061321 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Jan 21 15:43:12 crc kubenswrapper[5021]: I0121 15:43:12.068218 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-shrn8\" (UniqueName: \"kubernetes.io/projected/a7d0dd68-feb3-44e7-8f06-a94cc8ce3c81-kube-api-access-shrn8\") pod \"placement-0608-account-create-update-rgw6j\" (UID: \"a7d0dd68-feb3-44e7-8f06-a94cc8ce3c81\") " pod="openstack/placement-0608-account-create-update-rgw6j" Jan 21 15:43:12 crc kubenswrapper[5021]: I0121 15:43:12.072830 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-fba3-account-create-update-mmtzp"] Jan 21 15:43:12 crc kubenswrapper[5021]: I0121 15:43:12.111971 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-0608-account-create-update-rgw6j" Jan 21 15:43:12 crc kubenswrapper[5021]: I0121 15:43:12.127479 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-ckt26"] Jan 21 15:43:12 crc kubenswrapper[5021]: W0121 15:43:12.133182 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod966fe4ae_3218_4fe1_ac33_d3731130f13a.slice/crio-532d2f80b0329b735c711ffd30f92c9ef082cbb2fc342e8a095e9baed8d0a14d WatchSource:0}: Error finding container 532d2f80b0329b735c711ffd30f92c9ef082cbb2fc342e8a095e9baed8d0a14d: Status 404 returned error can't find the container with id 532d2f80b0329b735c711ffd30f92c9ef082cbb2fc342e8a095e9baed8d0a14d Jan 21 15:43:12 crc kubenswrapper[5021]: I0121 15:43:12.151370 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wrw89\" (UniqueName: \"kubernetes.io/projected/898b9fde-4ac9-449b-9ede-ea24a67e38e9-kube-api-access-wrw89\") pod \"glance-db-create-5zlsq\" (UID: \"898b9fde-4ac9-449b-9ede-ea24a67e38e9\") " pod="openstack/glance-db-create-5zlsq" Jan 21 15:43:12 crc kubenswrapper[5021]: I0121 15:43:12.151482 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/898b9fde-4ac9-449b-9ede-ea24a67e38e9-operator-scripts\") pod \"glance-db-create-5zlsq\" (UID: \"898b9fde-4ac9-449b-9ede-ea24a67e38e9\") " pod="openstack/glance-db-create-5zlsq" Jan 21 15:43:12 crc kubenswrapper[5021]: I0121 15:43:12.151601 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-th8lv\" (UniqueName: \"kubernetes.io/projected/a9b18b31-e098-4b42-be98-c3d6357905d1-kube-api-access-th8lv\") pod \"glance-fba3-account-create-update-mmtzp\" (UID: \"a9b18b31-e098-4b42-be98-c3d6357905d1\") " pod="openstack/glance-fba3-account-create-update-mmtzp" Jan 21 15:43:12 crc kubenswrapper[5021]: I0121 15:43:12.151627 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a9b18b31-e098-4b42-be98-c3d6357905d1-operator-scripts\") pod \"glance-fba3-account-create-update-mmtzp\" (UID: \"a9b18b31-e098-4b42-be98-c3d6357905d1\") " pod="openstack/glance-fba3-account-create-update-mmtzp" Jan 21 15:43:12 crc kubenswrapper[5021]: I0121 15:43:12.152697 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/898b9fde-4ac9-449b-9ede-ea24a67e38e9-operator-scripts\") pod \"glance-db-create-5zlsq\" (UID: \"898b9fde-4ac9-449b-9ede-ea24a67e38e9\") " pod="openstack/glance-db-create-5zlsq" Jan 21 15:43:12 crc kubenswrapper[5021]: I0121 15:43:12.171093 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wrw89\" (UniqueName: \"kubernetes.io/projected/898b9fde-4ac9-449b-9ede-ea24a67e38e9-kube-api-access-wrw89\") pod \"glance-db-create-5zlsq\" (UID: \"898b9fde-4ac9-449b-9ede-ea24a67e38e9\") " pod="openstack/glance-db-create-5zlsq" Jan 21 15:43:12 crc kubenswrapper[5021]: I0121 15:43:12.194252 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-xqkct-config-b6sdb" event={"ID":"de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe","Type":"ContainerDied","Data":"1c7ff2280ca366787a8a5987a7b5f7fd2bed57a5749ea2f03d3b1be578d19f5c"} Jan 21 15:43:12 crc kubenswrapper[5021]: I0121 15:43:12.194294 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1c7ff2280ca366787a8a5987a7b5f7fd2bed57a5749ea2f03d3b1be578d19f5c" Jan 21 15:43:12 crc kubenswrapper[5021]: I0121 15:43:12.194569 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-xqkct-config-b6sdb" Jan 21 15:43:12 crc kubenswrapper[5021]: I0121 15:43:12.197325 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-kblbs" event={"ID":"88f7e9ea-7874-47ee-8766-0279d2ea35b5","Type":"ContainerStarted","Data":"713460210feced6a89b3ba91549f632d927deefdaf7fb750b7607564f3cb34c8"} Jan 21 15:43:12 crc kubenswrapper[5021]: I0121 15:43:12.199860 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-ckt26" event={"ID":"966fe4ae-3218-4fe1-ac33-d3731130f13a","Type":"ContainerStarted","Data":"532d2f80b0329b735c711ffd30f92c9ef082cbb2fc342e8a095e9baed8d0a14d"} Jan 21 15:43:12 crc kubenswrapper[5021]: I0121 15:43:12.229347 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/root-account-create-update-kblbs" podStartSLOduration=2.22932891 podStartE2EDuration="2.22932891s" podCreationTimestamp="2026-01-21 15:43:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:43:12.223873111 +0000 UTC m=+1133.758987000" watchObservedRunningTime="2026-01-21 15:43:12.22932891 +0000 UTC m=+1133.764442799" Jan 21 15:43:12 crc kubenswrapper[5021]: I0121 15:43:12.253438 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-th8lv\" (UniqueName: \"kubernetes.io/projected/a9b18b31-e098-4b42-be98-c3d6357905d1-kube-api-access-th8lv\") pod \"glance-fba3-account-create-update-mmtzp\" (UID: \"a9b18b31-e098-4b42-be98-c3d6357905d1\") " pod="openstack/glance-fba3-account-create-update-mmtzp" Jan 21 15:43:12 crc kubenswrapper[5021]: I0121 15:43:12.253827 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a9b18b31-e098-4b42-be98-c3d6357905d1-operator-scripts\") pod \"glance-fba3-account-create-update-mmtzp\" (UID: \"a9b18b31-e098-4b42-be98-c3d6357905d1\") " pod="openstack/glance-fba3-account-create-update-mmtzp" Jan 21 15:43:12 crc kubenswrapper[5021]: I0121 15:43:12.255064 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a9b18b31-e098-4b42-be98-c3d6357905d1-operator-scripts\") pod \"glance-fba3-account-create-update-mmtzp\" (UID: \"a9b18b31-e098-4b42-be98-c3d6357905d1\") " pod="openstack/glance-fba3-account-create-update-mmtzp" Jan 21 15:43:12 crc kubenswrapper[5021]: I0121 15:43:12.256075 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-5zlsq" Jan 21 15:43:12 crc kubenswrapper[5021]: I0121 15:43:12.275853 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-th8lv\" (UniqueName: \"kubernetes.io/projected/a9b18b31-e098-4b42-be98-c3d6357905d1-kube-api-access-th8lv\") pod \"glance-fba3-account-create-update-mmtzp\" (UID: \"a9b18b31-e098-4b42-be98-c3d6357905d1\") " pod="openstack/glance-fba3-account-create-update-mmtzp" Jan 21 15:43:12 crc kubenswrapper[5021]: I0121 15:43:12.297029 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-0c6d-account-create-update-glfz4"] Jan 21 15:43:12 crc kubenswrapper[5021]: I0121 15:43:12.312496 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-xqkct-config-b6sdb"] Jan 21 15:43:12 crc kubenswrapper[5021]: I0121 15:43:12.324787 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-xqkct-config-b6sdb"] Jan 21 15:43:12 crc kubenswrapper[5021]: I0121 15:43:12.359809 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 15:43:12 crc kubenswrapper[5021]: I0121 15:43:12.359946 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 15:43:12 crc kubenswrapper[5021]: I0121 15:43:12.396266 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-fba3-account-create-update-mmtzp" Jan 21 15:43:12 crc kubenswrapper[5021]: I0121 15:43:12.426703 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-dkzj5"] Jan 21 15:43:12 crc kubenswrapper[5021]: W0121 15:43:12.440510 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb88b0f54_d71c_4296_a3e8_770209fbfbc6.slice/crio-10e27ae23ee8ee9dea5e404a18f93548c682b9cf9acd050fb717ca786665f610 WatchSource:0}: Error finding container 10e27ae23ee8ee9dea5e404a18f93548c682b9cf9acd050fb717ca786665f610: Status 404 returned error can't find the container with id 10e27ae23ee8ee9dea5e404a18f93548c682b9cf9acd050fb717ca786665f610 Jan 21 15:43:12 crc kubenswrapper[5021]: I0121 15:43:12.545876 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-xqkct" Jan 21 15:43:12 crc kubenswrapper[5021]: I0121 15:43:12.631023 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-0608-account-create-update-rgw6j"] Jan 21 15:43:12 crc kubenswrapper[5021]: W0121 15:43:12.634375 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda7d0dd68_feb3_44e7_8f06_a94cc8ce3c81.slice/crio-2b3d8a1e6fb0280a575b7ae06e56c2da54893afda57121c3edf65581602a86ac WatchSource:0}: Error finding container 2b3d8a1e6fb0280a575b7ae06e56c2da54893afda57121c3edf65581602a86ac: Status 404 returned error can't find the container with id 2b3d8a1e6fb0280a575b7ae06e56c2da54893afda57121c3edf65581602a86ac Jan 21 15:43:12 crc kubenswrapper[5021]: I0121 15:43:12.757181 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe" path="/var/lib/kubelet/pods/de5b7a11-4f5d-48ff-8fbe-9b4334dfd6fe/volumes" Jan 21 15:43:12 crc kubenswrapper[5021]: I0121 15:43:12.782084 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-5zlsq"] Jan 21 15:43:12 crc kubenswrapper[5021]: I0121 15:43:12.945629 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-fba3-account-create-update-mmtzp"] Jan 21 15:43:12 crc kubenswrapper[5021]: W0121 15:43:12.956526 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda9b18b31_e098_4b42_be98_c3d6357905d1.slice/crio-ef2ec3e9d048c899efc853c8d05a6ee55d228e98cd06a0e272c8ba881c040fd1 WatchSource:0}: Error finding container ef2ec3e9d048c899efc853c8d05a6ee55d228e98cd06a0e272c8ba881c040fd1: Status 404 returned error can't find the container with id ef2ec3e9d048c899efc853c8d05a6ee55d228e98cd06a0e272c8ba881c040fd1 Jan 21 15:43:13 crc kubenswrapper[5021]: I0121 15:43:13.207948 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-5zlsq" event={"ID":"898b9fde-4ac9-449b-9ede-ea24a67e38e9","Type":"ContainerStarted","Data":"ca85c552f36af93fb46d2259e2dbf92203a095ab2cffb4f322e61edd35822e38"} Jan 21 15:43:13 crc kubenswrapper[5021]: I0121 15:43:13.208949 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-dkzj5" event={"ID":"b88b0f54-d71c-4296-a3e8-770209fbfbc6","Type":"ContainerStarted","Data":"10e27ae23ee8ee9dea5e404a18f93548c682b9cf9acd050fb717ca786665f610"} Jan 21 15:43:13 crc kubenswrapper[5021]: I0121 15:43:13.209590 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-0c6d-account-create-update-glfz4" event={"ID":"791aef8a-49df-41be-8a61-7837ae62a00a","Type":"ContainerStarted","Data":"f0e992d54cf013fe228edd2cb40eb8e5fcda055c0104553995d9d5bbad88deae"} Jan 21 15:43:13 crc kubenswrapper[5021]: I0121 15:43:13.211149 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-fba3-account-create-update-mmtzp" event={"ID":"a9b18b31-e098-4b42-be98-c3d6357905d1","Type":"ContainerStarted","Data":"ef2ec3e9d048c899efc853c8d05a6ee55d228e98cd06a0e272c8ba881c040fd1"} Jan 21 15:43:13 crc kubenswrapper[5021]: I0121 15:43:13.212459 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-0608-account-create-update-rgw6j" event={"ID":"a7d0dd68-feb3-44e7-8f06-a94cc8ce3c81","Type":"ContainerStarted","Data":"2b3d8a1e6fb0280a575b7ae06e56c2da54893afda57121c3edf65581602a86ac"} Jan 21 15:43:13 crc kubenswrapper[5021]: I0121 15:43:13.903142 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-b8fbc5445-hmcx6" Jan 21 15:43:13 crc kubenswrapper[5021]: I0121 15:43:13.985068 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8554648995-mmjgr"] Jan 21 15:43:13 crc kubenswrapper[5021]: I0121 15:43:13.985333 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-8554648995-mmjgr" podUID="4e5b839a-247f-4e67-a522-d2a316caf769" containerName="dnsmasq-dns" containerID="cri-o://6b173375050768b9570ee74280e90c330022a40f605d1dda9bdaecdc281f7c57" gracePeriod=10 Jan 21 15:43:15 crc kubenswrapper[5021]: I0121 15:43:15.253075 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-dkzj5" event={"ID":"b88b0f54-d71c-4296-a3e8-770209fbfbc6","Type":"ContainerStarted","Data":"86f743359fcd566d9693e93514f567f4069f7d3c2bd29f4a5c9c25611ca370f2"} Jan 21 15:43:15 crc kubenswrapper[5021]: I0121 15:43:15.262998 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-0c6d-account-create-update-glfz4" event={"ID":"791aef8a-49df-41be-8a61-7837ae62a00a","Type":"ContainerStarted","Data":"82a3b32b6f797feb5d696f8b9c2c893157b9cf8fc0d12d892ee901637e843282"} Jan 21 15:43:15 crc kubenswrapper[5021]: I0121 15:43:15.276853 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-fba3-account-create-update-mmtzp" event={"ID":"a9b18b31-e098-4b42-be98-c3d6357905d1","Type":"ContainerStarted","Data":"9805801ced59f3c7c70da4ad1d539dbba64f039ddf070e12fe98335895ee1ee0"} Jan 21 15:43:15 crc kubenswrapper[5021]: I0121 15:43:15.283576 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-create-dkzj5" podStartSLOduration=4.283557839 podStartE2EDuration="4.283557839s" podCreationTimestamp="2026-01-21 15:43:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:43:15.271161651 +0000 UTC m=+1136.806275560" watchObservedRunningTime="2026-01-21 15:43:15.283557839 +0000 UTC m=+1136.818671718" Jan 21 15:43:15 crc kubenswrapper[5021]: I0121 15:43:15.298721 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-0c6d-account-create-update-glfz4" podStartSLOduration=4.298691713 podStartE2EDuration="4.298691713s" podCreationTimestamp="2026-01-21 15:43:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:43:15.29051735 +0000 UTC m=+1136.825631239" watchObservedRunningTime="2026-01-21 15:43:15.298691713 +0000 UTC m=+1136.833805602" Jan 21 15:43:15 crc kubenswrapper[5021]: I0121 15:43:15.298748 5021 generic.go:334] "Generic (PLEG): container finished" podID="4e5b839a-247f-4e67-a522-d2a316caf769" containerID="6b173375050768b9570ee74280e90c330022a40f605d1dda9bdaecdc281f7c57" exitCode=0 Jan 21 15:43:15 crc kubenswrapper[5021]: I0121 15:43:15.298776 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-mmjgr" event={"ID":"4e5b839a-247f-4e67-a522-d2a316caf769","Type":"ContainerDied","Data":"6b173375050768b9570ee74280e90c330022a40f605d1dda9bdaecdc281f7c57"} Jan 21 15:43:15 crc kubenswrapper[5021]: I0121 15:43:15.309895 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-0608-account-create-update-rgw6j" event={"ID":"a7d0dd68-feb3-44e7-8f06-a94cc8ce3c81","Type":"ContainerStarted","Data":"d8edd25cbef949a767e8ddceea01ada7fe08756600d6369521174db0ff452f7b"} Jan 21 15:43:15 crc kubenswrapper[5021]: I0121 15:43:15.317188 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-5zlsq" event={"ID":"898b9fde-4ac9-449b-9ede-ea24a67e38e9","Type":"ContainerStarted","Data":"c0b27d775f9f55733583fce8881bdf4ecef6a9bf5619a72c6e3c25574d30ebd4"} Jan 21 15:43:15 crc kubenswrapper[5021]: I0121 15:43:15.323686 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-fba3-account-create-update-mmtzp" podStartSLOduration=3.323654467 podStartE2EDuration="3.323654467s" podCreationTimestamp="2026-01-21 15:43:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:43:15.314692312 +0000 UTC m=+1136.849806201" watchObservedRunningTime="2026-01-21 15:43:15.323654467 +0000 UTC m=+1136.858768346" Jan 21 15:43:15 crc kubenswrapper[5021]: I0121 15:43:15.325863 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-ckt26" event={"ID":"966fe4ae-3218-4fe1-ac33-d3731130f13a","Type":"ContainerStarted","Data":"51d07b884b949c7a2964dc2ea552d145b9ced67ceec056ed46d3fe877a277f2e"} Jan 21 15:43:15 crc kubenswrapper[5021]: I0121 15:43:15.338030 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-0608-account-create-update-rgw6j" podStartSLOduration=4.338000849 podStartE2EDuration="4.338000849s" podCreationTimestamp="2026-01-21 15:43:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:43:15.331470231 +0000 UTC m=+1136.866584130" watchObservedRunningTime="2026-01-21 15:43:15.338000849 +0000 UTC m=+1136.873114738" Jan 21 15:43:15 crc kubenswrapper[5021]: I0121 15:43:15.356263 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-create-ckt26" podStartSLOduration=4.356240148 podStartE2EDuration="4.356240148s" podCreationTimestamp="2026-01-21 15:43:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:43:15.353125333 +0000 UTC m=+1136.888239222" watchObservedRunningTime="2026-01-21 15:43:15.356240148 +0000 UTC m=+1136.891354037" Jan 21 15:43:15 crc kubenswrapper[5021]: I0121 15:43:15.396698 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-create-5zlsq" podStartSLOduration=4.396673715 podStartE2EDuration="4.396673715s" podCreationTimestamp="2026-01-21 15:43:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:43:15.376870053 +0000 UTC m=+1136.911983942" watchObservedRunningTime="2026-01-21 15:43:15.396673715 +0000 UTC m=+1136.931787604" Jan 21 15:43:15 crc kubenswrapper[5021]: E0121 15:43:15.441306 5021 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod88f7e9ea_7874_47ee_8766_0279d2ea35b5.slice/crio-713460210feced6a89b3ba91549f632d927deefdaf7fb750b7607564f3cb34c8.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod88f7e9ea_7874_47ee_8766_0279d2ea35b5.slice/crio-conmon-713460210feced6a89b3ba91549f632d927deefdaf7fb750b7607564f3cb34c8.scope\": RecentStats: unable to find data in memory cache]" Jan 21 15:43:15 crc kubenswrapper[5021]: I0121 15:43:15.963788 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-mmjgr" Jan 21 15:43:16 crc kubenswrapper[5021]: I0121 15:43:16.034575 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4e5b839a-247f-4e67-a522-d2a316caf769-dns-svc\") pod \"4e5b839a-247f-4e67-a522-d2a316caf769\" (UID: \"4e5b839a-247f-4e67-a522-d2a316caf769\") " Jan 21 15:43:16 crc kubenswrapper[5021]: I0121 15:43:16.034706 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4e5b839a-247f-4e67-a522-d2a316caf769-ovsdbserver-sb\") pod \"4e5b839a-247f-4e67-a522-d2a316caf769\" (UID: \"4e5b839a-247f-4e67-a522-d2a316caf769\") " Jan 21 15:43:16 crc kubenswrapper[5021]: I0121 15:43:16.034784 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4e5b839a-247f-4e67-a522-d2a316caf769-ovsdbserver-nb\") pod \"4e5b839a-247f-4e67-a522-d2a316caf769\" (UID: \"4e5b839a-247f-4e67-a522-d2a316caf769\") " Jan 21 15:43:16 crc kubenswrapper[5021]: I0121 15:43:16.034826 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4e5b839a-247f-4e67-a522-d2a316caf769-config\") pod \"4e5b839a-247f-4e67-a522-d2a316caf769\" (UID: \"4e5b839a-247f-4e67-a522-d2a316caf769\") " Jan 21 15:43:16 crc kubenswrapper[5021]: I0121 15:43:16.034928 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5ddpp\" (UniqueName: \"kubernetes.io/projected/4e5b839a-247f-4e67-a522-d2a316caf769-kube-api-access-5ddpp\") pod \"4e5b839a-247f-4e67-a522-d2a316caf769\" (UID: \"4e5b839a-247f-4e67-a522-d2a316caf769\") " Jan 21 15:43:16 crc kubenswrapper[5021]: I0121 15:43:16.043497 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4e5b839a-247f-4e67-a522-d2a316caf769-kube-api-access-5ddpp" (OuterVolumeSpecName: "kube-api-access-5ddpp") pod "4e5b839a-247f-4e67-a522-d2a316caf769" (UID: "4e5b839a-247f-4e67-a522-d2a316caf769"). InnerVolumeSpecName "kube-api-access-5ddpp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:43:16 crc kubenswrapper[5021]: I0121 15:43:16.088763 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4e5b839a-247f-4e67-a522-d2a316caf769-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "4e5b839a-247f-4e67-a522-d2a316caf769" (UID: "4e5b839a-247f-4e67-a522-d2a316caf769"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:43:16 crc kubenswrapper[5021]: I0121 15:43:16.090931 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4e5b839a-247f-4e67-a522-d2a316caf769-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "4e5b839a-247f-4e67-a522-d2a316caf769" (UID: "4e5b839a-247f-4e67-a522-d2a316caf769"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:43:16 crc kubenswrapper[5021]: I0121 15:43:16.101746 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4e5b839a-247f-4e67-a522-d2a316caf769-config" (OuterVolumeSpecName: "config") pod "4e5b839a-247f-4e67-a522-d2a316caf769" (UID: "4e5b839a-247f-4e67-a522-d2a316caf769"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:43:16 crc kubenswrapper[5021]: I0121 15:43:16.124980 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4e5b839a-247f-4e67-a522-d2a316caf769-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "4e5b839a-247f-4e67-a522-d2a316caf769" (UID: "4e5b839a-247f-4e67-a522-d2a316caf769"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:43:16 crc kubenswrapper[5021]: I0121 15:43:16.136647 5021 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4e5b839a-247f-4e67-a522-d2a316caf769-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 21 15:43:16 crc kubenswrapper[5021]: I0121 15:43:16.136688 5021 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4e5b839a-247f-4e67-a522-d2a316caf769-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:43:16 crc kubenswrapper[5021]: I0121 15:43:16.136702 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5ddpp\" (UniqueName: \"kubernetes.io/projected/4e5b839a-247f-4e67-a522-d2a316caf769-kube-api-access-5ddpp\") on node \"crc\" DevicePath \"\"" Jan 21 15:43:16 crc kubenswrapper[5021]: I0121 15:43:16.136715 5021 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4e5b839a-247f-4e67-a522-d2a316caf769-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 21 15:43:16 crc kubenswrapper[5021]: I0121 15:43:16.136727 5021 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4e5b839a-247f-4e67-a522-d2a316caf769-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 21 15:43:16 crc kubenswrapper[5021]: I0121 15:43:16.335188 5021 generic.go:334] "Generic (PLEG): container finished" podID="966fe4ae-3218-4fe1-ac33-d3731130f13a" containerID="51d07b884b949c7a2964dc2ea552d145b9ced67ceec056ed46d3fe877a277f2e" exitCode=0 Jan 21 15:43:16 crc kubenswrapper[5021]: I0121 15:43:16.335235 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-ckt26" event={"ID":"966fe4ae-3218-4fe1-ac33-d3731130f13a","Type":"ContainerDied","Data":"51d07b884b949c7a2964dc2ea552d145b9ced67ceec056ed46d3fe877a277f2e"} Jan 21 15:43:16 crc kubenswrapper[5021]: I0121 15:43:16.337664 5021 generic.go:334] "Generic (PLEG): container finished" podID="b88b0f54-d71c-4296-a3e8-770209fbfbc6" containerID="86f743359fcd566d9693e93514f567f4069f7d3c2bd29f4a5c9c25611ca370f2" exitCode=0 Jan 21 15:43:16 crc kubenswrapper[5021]: I0121 15:43:16.337720 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-dkzj5" event={"ID":"b88b0f54-d71c-4296-a3e8-770209fbfbc6","Type":"ContainerDied","Data":"86f743359fcd566d9693e93514f567f4069f7d3c2bd29f4a5c9c25611ca370f2"} Jan 21 15:43:16 crc kubenswrapper[5021]: I0121 15:43:16.340618 5021 generic.go:334] "Generic (PLEG): container finished" podID="791aef8a-49df-41be-8a61-7837ae62a00a" containerID="82a3b32b6f797feb5d696f8b9c2c893157b9cf8fc0d12d892ee901637e843282" exitCode=0 Jan 21 15:43:16 crc kubenswrapper[5021]: I0121 15:43:16.340715 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-0c6d-account-create-update-glfz4" event={"ID":"791aef8a-49df-41be-8a61-7837ae62a00a","Type":"ContainerDied","Data":"82a3b32b6f797feb5d696f8b9c2c893157b9cf8fc0d12d892ee901637e843282"} Jan 21 15:43:16 crc kubenswrapper[5021]: I0121 15:43:16.342305 5021 generic.go:334] "Generic (PLEG): container finished" podID="a9b18b31-e098-4b42-be98-c3d6357905d1" containerID="9805801ced59f3c7c70da4ad1d539dbba64f039ddf070e12fe98335895ee1ee0" exitCode=0 Jan 21 15:43:16 crc kubenswrapper[5021]: I0121 15:43:16.342370 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-fba3-account-create-update-mmtzp" event={"ID":"a9b18b31-e098-4b42-be98-c3d6357905d1","Type":"ContainerDied","Data":"9805801ced59f3c7c70da4ad1d539dbba64f039ddf070e12fe98335895ee1ee0"} Jan 21 15:43:16 crc kubenswrapper[5021]: I0121 15:43:16.344780 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-mmjgr" Jan 21 15:43:16 crc kubenswrapper[5021]: I0121 15:43:16.344777 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-mmjgr" event={"ID":"4e5b839a-247f-4e67-a522-d2a316caf769","Type":"ContainerDied","Data":"2f4c031f9a2707b46ec85287e0f9e29ade45f80816e0d6918b3cac1ddccd0afe"} Jan 21 15:43:16 crc kubenswrapper[5021]: I0121 15:43:16.345037 5021 scope.go:117] "RemoveContainer" containerID="6b173375050768b9570ee74280e90c330022a40f605d1dda9bdaecdc281f7c57" Jan 21 15:43:16 crc kubenswrapper[5021]: I0121 15:43:16.346573 5021 generic.go:334] "Generic (PLEG): container finished" podID="a7d0dd68-feb3-44e7-8f06-a94cc8ce3c81" containerID="d8edd25cbef949a767e8ddceea01ada7fe08756600d6369521174db0ff452f7b" exitCode=0 Jan 21 15:43:16 crc kubenswrapper[5021]: I0121 15:43:16.346605 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-0608-account-create-update-rgw6j" event={"ID":"a7d0dd68-feb3-44e7-8f06-a94cc8ce3c81","Type":"ContainerDied","Data":"d8edd25cbef949a767e8ddceea01ada7fe08756600d6369521174db0ff452f7b"} Jan 21 15:43:16 crc kubenswrapper[5021]: I0121 15:43:16.348454 5021 generic.go:334] "Generic (PLEG): container finished" podID="898b9fde-4ac9-449b-9ede-ea24a67e38e9" containerID="c0b27d775f9f55733583fce8881bdf4ecef6a9bf5619a72c6e3c25574d30ebd4" exitCode=0 Jan 21 15:43:16 crc kubenswrapper[5021]: I0121 15:43:16.348520 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-5zlsq" event={"ID":"898b9fde-4ac9-449b-9ede-ea24a67e38e9","Type":"ContainerDied","Data":"c0b27d775f9f55733583fce8881bdf4ecef6a9bf5619a72c6e3c25574d30ebd4"} Jan 21 15:43:16 crc kubenswrapper[5021]: I0121 15:43:16.350507 5021 generic.go:334] "Generic (PLEG): container finished" podID="88f7e9ea-7874-47ee-8766-0279d2ea35b5" containerID="713460210feced6a89b3ba91549f632d927deefdaf7fb750b7607564f3cb34c8" exitCode=0 Jan 21 15:43:16 crc kubenswrapper[5021]: I0121 15:43:16.350565 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-kblbs" event={"ID":"88f7e9ea-7874-47ee-8766-0279d2ea35b5","Type":"ContainerDied","Data":"713460210feced6a89b3ba91549f632d927deefdaf7fb750b7607564f3cb34c8"} Jan 21 15:43:16 crc kubenswrapper[5021]: I0121 15:43:16.368245 5021 scope.go:117] "RemoveContainer" containerID="5597c098754f20730917848d0abd2b7c5979f1b03a7d030560a5cdb07e3c024e" Jan 21 15:43:16 crc kubenswrapper[5021]: I0121 15:43:16.512558 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8554648995-mmjgr"] Jan 21 15:43:16 crc kubenswrapper[5021]: I0121 15:43:16.523281 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8554648995-mmjgr"] Jan 21 15:43:16 crc kubenswrapper[5021]: I0121 15:43:16.748680 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4e5b839a-247f-4e67-a522-d2a316caf769" path="/var/lib/kubelet/pods/4e5b839a-247f-4e67-a522-d2a316caf769/volumes" Jan 21 15:43:17 crc kubenswrapper[5021]: I0121 15:43:17.664220 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/c57ca8a9-e2f8-4404-b56f-649297cba618-etc-swift\") pod \"swift-storage-0\" (UID: \"c57ca8a9-e2f8-4404-b56f-649297cba618\") " pod="openstack/swift-storage-0" Jan 21 15:43:17 crc kubenswrapper[5021]: I0121 15:43:17.671074 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/c57ca8a9-e2f8-4404-b56f-649297cba618-etc-swift\") pod \"swift-storage-0\" (UID: \"c57ca8a9-e2f8-4404-b56f-649297cba618\") " pod="openstack/swift-storage-0" Jan 21 15:43:17 crc kubenswrapper[5021]: I0121 15:43:17.783205 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 21 15:43:17 crc kubenswrapper[5021]: I0121 15:43:17.792353 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-0c6d-account-create-update-glfz4" Jan 21 15:43:17 crc kubenswrapper[5021]: I0121 15:43:17.867342 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nfphx\" (UniqueName: \"kubernetes.io/projected/791aef8a-49df-41be-8a61-7837ae62a00a-kube-api-access-nfphx\") pod \"791aef8a-49df-41be-8a61-7837ae62a00a\" (UID: \"791aef8a-49df-41be-8a61-7837ae62a00a\") " Jan 21 15:43:17 crc kubenswrapper[5021]: I0121 15:43:17.867399 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/791aef8a-49df-41be-8a61-7837ae62a00a-operator-scripts\") pod \"791aef8a-49df-41be-8a61-7837ae62a00a\" (UID: \"791aef8a-49df-41be-8a61-7837ae62a00a\") " Jan 21 15:43:17 crc kubenswrapper[5021]: I0121 15:43:17.869324 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/791aef8a-49df-41be-8a61-7837ae62a00a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "791aef8a-49df-41be-8a61-7837ae62a00a" (UID: "791aef8a-49df-41be-8a61-7837ae62a00a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:43:17 crc kubenswrapper[5021]: I0121 15:43:17.876120 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/791aef8a-49df-41be-8a61-7837ae62a00a-kube-api-access-nfphx" (OuterVolumeSpecName: "kube-api-access-nfphx") pod "791aef8a-49df-41be-8a61-7837ae62a00a" (UID: "791aef8a-49df-41be-8a61-7837ae62a00a"). InnerVolumeSpecName "kube-api-access-nfphx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:43:17 crc kubenswrapper[5021]: I0121 15:43:17.966086 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-5zlsq" Jan 21 15:43:17 crc kubenswrapper[5021]: I0121 15:43:17.970213 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nfphx\" (UniqueName: \"kubernetes.io/projected/791aef8a-49df-41be-8a61-7837ae62a00a-kube-api-access-nfphx\") on node \"crc\" DevicePath \"\"" Jan 21 15:43:17 crc kubenswrapper[5021]: I0121 15:43:17.970261 5021 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/791aef8a-49df-41be-8a61-7837ae62a00a-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:43:17 crc kubenswrapper[5021]: I0121 15:43:17.976470 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-ckt26" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.004382 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-fba3-account-create-update-mmtzp" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.005999 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-0608-account-create-update-rgw6j" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.029200 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-kblbs" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.071209 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wrw89\" (UniqueName: \"kubernetes.io/projected/898b9fde-4ac9-449b-9ede-ea24a67e38e9-kube-api-access-wrw89\") pod \"898b9fde-4ac9-449b-9ede-ea24a67e38e9\" (UID: \"898b9fde-4ac9-449b-9ede-ea24a67e38e9\") " Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.071273 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/898b9fde-4ac9-449b-9ede-ea24a67e38e9-operator-scripts\") pod \"898b9fde-4ac9-449b-9ede-ea24a67e38e9\" (UID: \"898b9fde-4ac9-449b-9ede-ea24a67e38e9\") " Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.071433 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jgksh\" (UniqueName: \"kubernetes.io/projected/966fe4ae-3218-4fe1-ac33-d3731130f13a-kube-api-access-jgksh\") pod \"966fe4ae-3218-4fe1-ac33-d3731130f13a\" (UID: \"966fe4ae-3218-4fe1-ac33-d3731130f13a\") " Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.071513 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/966fe4ae-3218-4fe1-ac33-d3731130f13a-operator-scripts\") pod \"966fe4ae-3218-4fe1-ac33-d3731130f13a\" (UID: \"966fe4ae-3218-4fe1-ac33-d3731130f13a\") " Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.071825 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/898b9fde-4ac9-449b-9ede-ea24a67e38e9-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "898b9fde-4ac9-449b-9ede-ea24a67e38e9" (UID: "898b9fde-4ac9-449b-9ede-ea24a67e38e9"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.072284 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/966fe4ae-3218-4fe1-ac33-d3731130f13a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "966fe4ae-3218-4fe1-ac33-d3731130f13a" (UID: "966fe4ae-3218-4fe1-ac33-d3731130f13a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.080099 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/898b9fde-4ac9-449b-9ede-ea24a67e38e9-kube-api-access-wrw89" (OuterVolumeSpecName: "kube-api-access-wrw89") pod "898b9fde-4ac9-449b-9ede-ea24a67e38e9" (UID: "898b9fde-4ac9-449b-9ede-ea24a67e38e9"). InnerVolumeSpecName "kube-api-access-wrw89". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.080289 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/966fe4ae-3218-4fe1-ac33-d3731130f13a-kube-api-access-jgksh" (OuterVolumeSpecName: "kube-api-access-jgksh") pod "966fe4ae-3218-4fe1-ac33-d3731130f13a" (UID: "966fe4ae-3218-4fe1-ac33-d3731130f13a"). InnerVolumeSpecName "kube-api-access-jgksh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.084730 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-dkzj5" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.172790 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a7d0dd68-feb3-44e7-8f06-a94cc8ce3c81-operator-scripts\") pod \"a7d0dd68-feb3-44e7-8f06-a94cc8ce3c81\" (UID: \"a7d0dd68-feb3-44e7-8f06-a94cc8ce3c81\") " Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.172855 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/88f7e9ea-7874-47ee-8766-0279d2ea35b5-operator-scripts\") pod \"88f7e9ea-7874-47ee-8766-0279d2ea35b5\" (UID: \"88f7e9ea-7874-47ee-8766-0279d2ea35b5\") " Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.172890 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kwpqz\" (UniqueName: \"kubernetes.io/projected/88f7e9ea-7874-47ee-8766-0279d2ea35b5-kube-api-access-kwpqz\") pod \"88f7e9ea-7874-47ee-8766-0279d2ea35b5\" (UID: \"88f7e9ea-7874-47ee-8766-0279d2ea35b5\") " Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.172972 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-th8lv\" (UniqueName: \"kubernetes.io/projected/a9b18b31-e098-4b42-be98-c3d6357905d1-kube-api-access-th8lv\") pod \"a9b18b31-e098-4b42-be98-c3d6357905d1\" (UID: \"a9b18b31-e098-4b42-be98-c3d6357905d1\") " Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.173035 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zj76j\" (UniqueName: \"kubernetes.io/projected/b88b0f54-d71c-4296-a3e8-770209fbfbc6-kube-api-access-zj76j\") pod \"b88b0f54-d71c-4296-a3e8-770209fbfbc6\" (UID: \"b88b0f54-d71c-4296-a3e8-770209fbfbc6\") " Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.173052 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b88b0f54-d71c-4296-a3e8-770209fbfbc6-operator-scripts\") pod \"b88b0f54-d71c-4296-a3e8-770209fbfbc6\" (UID: \"b88b0f54-d71c-4296-a3e8-770209fbfbc6\") " Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.173122 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a9b18b31-e098-4b42-be98-c3d6357905d1-operator-scripts\") pod \"a9b18b31-e098-4b42-be98-c3d6357905d1\" (UID: \"a9b18b31-e098-4b42-be98-c3d6357905d1\") " Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.173163 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-shrn8\" (UniqueName: \"kubernetes.io/projected/a7d0dd68-feb3-44e7-8f06-a94cc8ce3c81-kube-api-access-shrn8\") pod \"a7d0dd68-feb3-44e7-8f06-a94cc8ce3c81\" (UID: \"a7d0dd68-feb3-44e7-8f06-a94cc8ce3c81\") " Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.173495 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jgksh\" (UniqueName: \"kubernetes.io/projected/966fe4ae-3218-4fe1-ac33-d3731130f13a-kube-api-access-jgksh\") on node \"crc\" DevicePath \"\"" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.173516 5021 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/966fe4ae-3218-4fe1-ac33-d3731130f13a-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.173527 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wrw89\" (UniqueName: \"kubernetes.io/projected/898b9fde-4ac9-449b-9ede-ea24a67e38e9-kube-api-access-wrw89\") on node \"crc\" DevicePath \"\"" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.173538 5021 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/898b9fde-4ac9-449b-9ede-ea24a67e38e9-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.173780 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/88f7e9ea-7874-47ee-8766-0279d2ea35b5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "88f7e9ea-7874-47ee-8766-0279d2ea35b5" (UID: "88f7e9ea-7874-47ee-8766-0279d2ea35b5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.173810 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b88b0f54-d71c-4296-a3e8-770209fbfbc6-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b88b0f54-d71c-4296-a3e8-770209fbfbc6" (UID: "b88b0f54-d71c-4296-a3e8-770209fbfbc6"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.173814 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a7d0dd68-feb3-44e7-8f06-a94cc8ce3c81-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a7d0dd68-feb3-44e7-8f06-a94cc8ce3c81" (UID: "a7d0dd68-feb3-44e7-8f06-a94cc8ce3c81"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.174370 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a9b18b31-e098-4b42-be98-c3d6357905d1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a9b18b31-e098-4b42-be98-c3d6357905d1" (UID: "a9b18b31-e098-4b42-be98-c3d6357905d1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.176216 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b88b0f54-d71c-4296-a3e8-770209fbfbc6-kube-api-access-zj76j" (OuterVolumeSpecName: "kube-api-access-zj76j") pod "b88b0f54-d71c-4296-a3e8-770209fbfbc6" (UID: "b88b0f54-d71c-4296-a3e8-770209fbfbc6"). InnerVolumeSpecName "kube-api-access-zj76j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.177390 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a7d0dd68-feb3-44e7-8f06-a94cc8ce3c81-kube-api-access-shrn8" (OuterVolumeSpecName: "kube-api-access-shrn8") pod "a7d0dd68-feb3-44e7-8f06-a94cc8ce3c81" (UID: "a7d0dd68-feb3-44e7-8f06-a94cc8ce3c81"). InnerVolumeSpecName "kube-api-access-shrn8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.177423 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a9b18b31-e098-4b42-be98-c3d6357905d1-kube-api-access-th8lv" (OuterVolumeSpecName: "kube-api-access-th8lv") pod "a9b18b31-e098-4b42-be98-c3d6357905d1" (UID: "a9b18b31-e098-4b42-be98-c3d6357905d1"). InnerVolumeSpecName "kube-api-access-th8lv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.178178 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/88f7e9ea-7874-47ee-8766-0279d2ea35b5-kube-api-access-kwpqz" (OuterVolumeSpecName: "kube-api-access-kwpqz") pod "88f7e9ea-7874-47ee-8766-0279d2ea35b5" (UID: "88f7e9ea-7874-47ee-8766-0279d2ea35b5"). InnerVolumeSpecName "kube-api-access-kwpqz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.275576 5021 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a9b18b31-e098-4b42-be98-c3d6357905d1-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.275610 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-shrn8\" (UniqueName: \"kubernetes.io/projected/a7d0dd68-feb3-44e7-8f06-a94cc8ce3c81-kube-api-access-shrn8\") on node \"crc\" DevicePath \"\"" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.275621 5021 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a7d0dd68-feb3-44e7-8f06-a94cc8ce3c81-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.275630 5021 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/88f7e9ea-7874-47ee-8766-0279d2ea35b5-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.275638 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kwpqz\" (UniqueName: \"kubernetes.io/projected/88f7e9ea-7874-47ee-8766-0279d2ea35b5-kube-api-access-kwpqz\") on node \"crc\" DevicePath \"\"" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.275646 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-th8lv\" (UniqueName: \"kubernetes.io/projected/a9b18b31-e098-4b42-be98-c3d6357905d1-kube-api-access-th8lv\") on node \"crc\" DevicePath \"\"" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.275654 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zj76j\" (UniqueName: \"kubernetes.io/projected/b88b0f54-d71c-4296-a3e8-770209fbfbc6-kube-api-access-zj76j\") on node \"crc\" DevicePath \"\"" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.275663 5021 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b88b0f54-d71c-4296-a3e8-770209fbfbc6-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.360002 5021 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.101:5671: connect: connection refused" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.379960 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-dkzj5" event={"ID":"b88b0f54-d71c-4296-a3e8-770209fbfbc6","Type":"ContainerDied","Data":"10e27ae23ee8ee9dea5e404a18f93548c682b9cf9acd050fb717ca786665f610"} Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.379999 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="10e27ae23ee8ee9dea5e404a18f93548c682b9cf9acd050fb717ca786665f610" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.380057 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-dkzj5" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.390434 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-0c6d-account-create-update-glfz4" event={"ID":"791aef8a-49df-41be-8a61-7837ae62a00a","Type":"ContainerDied","Data":"f0e992d54cf013fe228edd2cb40eb8e5fcda055c0104553995d9d5bbad88deae"} Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.390481 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f0e992d54cf013fe228edd2cb40eb8e5fcda055c0104553995d9d5bbad88deae" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.390555 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-0c6d-account-create-update-glfz4" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.413237 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-fba3-account-create-update-mmtzp" event={"ID":"a9b18b31-e098-4b42-be98-c3d6357905d1","Type":"ContainerDied","Data":"ef2ec3e9d048c899efc853c8d05a6ee55d228e98cd06a0e272c8ba881c040fd1"} Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.413284 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ef2ec3e9d048c899efc853c8d05a6ee55d228e98cd06a0e272c8ba881c040fd1" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.413363 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-fba3-account-create-update-mmtzp" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.428972 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-0608-account-create-update-rgw6j" event={"ID":"a7d0dd68-feb3-44e7-8f06-a94cc8ce3c81","Type":"ContainerDied","Data":"2b3d8a1e6fb0280a575b7ae06e56c2da54893afda57121c3edf65581602a86ac"} Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.429012 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2b3d8a1e6fb0280a575b7ae06e56c2da54893afda57121c3edf65581602a86ac" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.429079 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-0608-account-create-update-rgw6j" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.451264 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-5zlsq" event={"ID":"898b9fde-4ac9-449b-9ede-ea24a67e38e9","Type":"ContainerDied","Data":"ca85c552f36af93fb46d2259e2dbf92203a095ab2cffb4f322e61edd35822e38"} Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.451325 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ca85c552f36af93fb46d2259e2dbf92203a095ab2cffb4f322e61edd35822e38" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.451435 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-5zlsq" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.470127 5021 generic.go:334] "Generic (PLEG): container finished" podID="8f97c362-e247-4151-b007-2b3006b50488" containerID="bfac950ffa328d62ce1ff0c4ca1ef8145a00d3450ad701a49fae566a5600191f" exitCode=0 Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.470245 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-86fx8" event={"ID":"8f97c362-e247-4151-b007-2b3006b50488","Type":"ContainerDied","Data":"bfac950ffa328d62ce1ff0c4ca1ef8145a00d3450ad701a49fae566a5600191f"} Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.492431 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-kblbs" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.499829 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-kblbs" event={"ID":"88f7e9ea-7874-47ee-8766-0279d2ea35b5","Type":"ContainerDied","Data":"50fd1be2deb4f981320d673348af2ac3231ef66933ebd209ac4c7572ea875ee1"} Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.499889 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="50fd1be2deb4f981320d673348af2ac3231ef66933ebd209ac4c7572ea875ee1" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.503137 5021 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="2dff28e1-6d0f-4a7d-8fcf-0edf26e63825" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.102:5671: connect: connection refused" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.505926 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-ckt26" event={"ID":"966fe4ae-3218-4fe1-ac33-d3731130f13a","Type":"ContainerDied","Data":"532d2f80b0329b735c711ffd30f92c9ef082cbb2fc342e8a095e9baed8d0a14d"} Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.505985 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="532d2f80b0329b735c711ffd30f92c9ef082cbb2fc342e8a095e9baed8d0a14d" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.506094 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-ckt26" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.692060 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.724092 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-kblbs"] Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.769479 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-kblbs"] Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.825059 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-fxrjc"] Jan 21 15:43:18 crc kubenswrapper[5021]: E0121 15:43:18.825505 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="88f7e9ea-7874-47ee-8766-0279d2ea35b5" containerName="mariadb-account-create-update" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.825531 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="88f7e9ea-7874-47ee-8766-0279d2ea35b5" containerName="mariadb-account-create-update" Jan 21 15:43:18 crc kubenswrapper[5021]: E0121 15:43:18.825551 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="791aef8a-49df-41be-8a61-7837ae62a00a" containerName="mariadb-account-create-update" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.825559 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="791aef8a-49df-41be-8a61-7837ae62a00a" containerName="mariadb-account-create-update" Jan 21 15:43:18 crc kubenswrapper[5021]: E0121 15:43:18.825574 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="966fe4ae-3218-4fe1-ac33-d3731130f13a" containerName="mariadb-database-create" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.825581 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="966fe4ae-3218-4fe1-ac33-d3731130f13a" containerName="mariadb-database-create" Jan 21 15:43:18 crc kubenswrapper[5021]: E0121 15:43:18.825595 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e5b839a-247f-4e67-a522-d2a316caf769" containerName="dnsmasq-dns" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.825602 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e5b839a-247f-4e67-a522-d2a316caf769" containerName="dnsmasq-dns" Jan 21 15:43:18 crc kubenswrapper[5021]: E0121 15:43:18.825621 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="898b9fde-4ac9-449b-9ede-ea24a67e38e9" containerName="mariadb-database-create" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.825630 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="898b9fde-4ac9-449b-9ede-ea24a67e38e9" containerName="mariadb-database-create" Jan 21 15:43:18 crc kubenswrapper[5021]: E0121 15:43:18.825647 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e5b839a-247f-4e67-a522-d2a316caf769" containerName="init" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.825655 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e5b839a-247f-4e67-a522-d2a316caf769" containerName="init" Jan 21 15:43:18 crc kubenswrapper[5021]: E0121 15:43:18.825670 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b88b0f54-d71c-4296-a3e8-770209fbfbc6" containerName="mariadb-database-create" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.825677 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="b88b0f54-d71c-4296-a3e8-770209fbfbc6" containerName="mariadb-database-create" Jan 21 15:43:18 crc kubenswrapper[5021]: E0121 15:43:18.825687 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7d0dd68-feb3-44e7-8f06-a94cc8ce3c81" containerName="mariadb-account-create-update" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.825694 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7d0dd68-feb3-44e7-8f06-a94cc8ce3c81" containerName="mariadb-account-create-update" Jan 21 15:43:18 crc kubenswrapper[5021]: E0121 15:43:18.825709 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9b18b31-e098-4b42-be98-c3d6357905d1" containerName="mariadb-account-create-update" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.825716 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9b18b31-e098-4b42-be98-c3d6357905d1" containerName="mariadb-account-create-update" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.825965 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="b88b0f54-d71c-4296-a3e8-770209fbfbc6" containerName="mariadb-database-create" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.825985 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="966fe4ae-3218-4fe1-ac33-d3731130f13a" containerName="mariadb-database-create" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.825997 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="898b9fde-4ac9-449b-9ede-ea24a67e38e9" containerName="mariadb-database-create" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.826006 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="791aef8a-49df-41be-8a61-7837ae62a00a" containerName="mariadb-account-create-update" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.826029 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="4e5b839a-247f-4e67-a522-d2a316caf769" containerName="dnsmasq-dns" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.826051 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="88f7e9ea-7874-47ee-8766-0279d2ea35b5" containerName="mariadb-account-create-update" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.826060 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="a9b18b31-e098-4b42-be98-c3d6357905d1" containerName="mariadb-account-create-update" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.826071 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7d0dd68-feb3-44e7-8f06-a94cc8ce3c81" containerName="mariadb-account-create-update" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.826735 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-fxrjc" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.833387 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.839338 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-mariadb-root-db-secret" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.857037 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-fxrjc"] Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.908045 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d8a758f3-9e27-4c1b-917e-c4a16c5f3342-operator-scripts\") pod \"root-account-create-update-fxrjc\" (UID: \"d8a758f3-9e27-4c1b-917e-c4a16c5f3342\") " pod="openstack/root-account-create-update-fxrjc" Jan 21 15:43:18 crc kubenswrapper[5021]: I0121 15:43:18.908099 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pch6k\" (UniqueName: \"kubernetes.io/projected/d8a758f3-9e27-4c1b-917e-c4a16c5f3342-kube-api-access-pch6k\") pod \"root-account-create-update-fxrjc\" (UID: \"d8a758f3-9e27-4c1b-917e-c4a16c5f3342\") " pod="openstack/root-account-create-update-fxrjc" Jan 21 15:43:19 crc kubenswrapper[5021]: I0121 15:43:19.009980 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d8a758f3-9e27-4c1b-917e-c4a16c5f3342-operator-scripts\") pod \"root-account-create-update-fxrjc\" (UID: \"d8a758f3-9e27-4c1b-917e-c4a16c5f3342\") " pod="openstack/root-account-create-update-fxrjc" Jan 21 15:43:19 crc kubenswrapper[5021]: I0121 15:43:19.010032 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pch6k\" (UniqueName: \"kubernetes.io/projected/d8a758f3-9e27-4c1b-917e-c4a16c5f3342-kube-api-access-pch6k\") pod \"root-account-create-update-fxrjc\" (UID: \"d8a758f3-9e27-4c1b-917e-c4a16c5f3342\") " pod="openstack/root-account-create-update-fxrjc" Jan 21 15:43:19 crc kubenswrapper[5021]: I0121 15:43:19.011092 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d8a758f3-9e27-4c1b-917e-c4a16c5f3342-operator-scripts\") pod \"root-account-create-update-fxrjc\" (UID: \"d8a758f3-9e27-4c1b-917e-c4a16c5f3342\") " pod="openstack/root-account-create-update-fxrjc" Jan 21 15:43:19 crc kubenswrapper[5021]: I0121 15:43:19.029539 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pch6k\" (UniqueName: \"kubernetes.io/projected/d8a758f3-9e27-4c1b-917e-c4a16c5f3342-kube-api-access-pch6k\") pod \"root-account-create-update-fxrjc\" (UID: \"d8a758f3-9e27-4c1b-917e-c4a16c5f3342\") " pod="openstack/root-account-create-update-fxrjc" Jan 21 15:43:19 crc kubenswrapper[5021]: I0121 15:43:19.187177 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-fxrjc" Jan 21 15:43:19 crc kubenswrapper[5021]: I0121 15:43:19.514844 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c57ca8a9-e2f8-4404-b56f-649297cba618","Type":"ContainerStarted","Data":"9b5df468f1f3ee7d1a2d8b8bfc8134ea166fe80bc890b1ef035b070414f33516"} Jan 21 15:43:19 crc kubenswrapper[5021]: I0121 15:43:19.649776 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-fxrjc"] Jan 21 15:43:19 crc kubenswrapper[5021]: W0121 15:43:19.654577 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd8a758f3_9e27_4c1b_917e_c4a16c5f3342.slice/crio-7d2931740bb8f8bf49c3320803078edb2a0361c09c3a92e2163ce5ecc2b1c8c6 WatchSource:0}: Error finding container 7d2931740bb8f8bf49c3320803078edb2a0361c09c3a92e2163ce5ecc2b1c8c6: Status 404 returned error can't find the container with id 7d2931740bb8f8bf49c3320803078edb2a0361c09c3a92e2163ce5ecc2b1c8c6 Jan 21 15:43:19 crc kubenswrapper[5021]: I0121 15:43:19.879302 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-86fx8" Jan 21 15:43:20 crc kubenswrapper[5021]: I0121 15:43:20.027186 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f97c362-e247-4151-b007-2b3006b50488-combined-ca-bundle\") pod \"8f97c362-e247-4151-b007-2b3006b50488\" (UID: \"8f97c362-e247-4151-b007-2b3006b50488\") " Jan 21 15:43:20 crc kubenswrapper[5021]: I0121 15:43:20.027312 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/8f97c362-e247-4151-b007-2b3006b50488-swiftconf\") pod \"8f97c362-e247-4151-b007-2b3006b50488\" (UID: \"8f97c362-e247-4151-b007-2b3006b50488\") " Jan 21 15:43:20 crc kubenswrapper[5021]: I0121 15:43:20.027399 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/8f97c362-e247-4151-b007-2b3006b50488-ring-data-devices\") pod \"8f97c362-e247-4151-b007-2b3006b50488\" (UID: \"8f97c362-e247-4151-b007-2b3006b50488\") " Jan 21 15:43:20 crc kubenswrapper[5021]: I0121 15:43:20.027524 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/8f97c362-e247-4151-b007-2b3006b50488-dispersionconf\") pod \"8f97c362-e247-4151-b007-2b3006b50488\" (UID: \"8f97c362-e247-4151-b007-2b3006b50488\") " Jan 21 15:43:20 crc kubenswrapper[5021]: I0121 15:43:20.027569 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8f97c362-e247-4151-b007-2b3006b50488-scripts\") pod \"8f97c362-e247-4151-b007-2b3006b50488\" (UID: \"8f97c362-e247-4151-b007-2b3006b50488\") " Jan 21 15:43:20 crc kubenswrapper[5021]: I0121 15:43:20.027600 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hfnnp\" (UniqueName: \"kubernetes.io/projected/8f97c362-e247-4151-b007-2b3006b50488-kube-api-access-hfnnp\") pod \"8f97c362-e247-4151-b007-2b3006b50488\" (UID: \"8f97c362-e247-4151-b007-2b3006b50488\") " Jan 21 15:43:20 crc kubenswrapper[5021]: I0121 15:43:20.027648 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/8f97c362-e247-4151-b007-2b3006b50488-etc-swift\") pod \"8f97c362-e247-4151-b007-2b3006b50488\" (UID: \"8f97c362-e247-4151-b007-2b3006b50488\") " Jan 21 15:43:20 crc kubenswrapper[5021]: I0121 15:43:20.028700 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f97c362-e247-4151-b007-2b3006b50488-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "8f97c362-e247-4151-b007-2b3006b50488" (UID: "8f97c362-e247-4151-b007-2b3006b50488"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:43:20 crc kubenswrapper[5021]: I0121 15:43:20.029048 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f97c362-e247-4151-b007-2b3006b50488-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "8f97c362-e247-4151-b007-2b3006b50488" (UID: "8f97c362-e247-4151-b007-2b3006b50488"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:43:20 crc kubenswrapper[5021]: I0121 15:43:20.033546 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f97c362-e247-4151-b007-2b3006b50488-kube-api-access-hfnnp" (OuterVolumeSpecName: "kube-api-access-hfnnp") pod "8f97c362-e247-4151-b007-2b3006b50488" (UID: "8f97c362-e247-4151-b007-2b3006b50488"). InnerVolumeSpecName "kube-api-access-hfnnp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:43:20 crc kubenswrapper[5021]: I0121 15:43:20.037303 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f97c362-e247-4151-b007-2b3006b50488-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "8f97c362-e247-4151-b007-2b3006b50488" (UID: "8f97c362-e247-4151-b007-2b3006b50488"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:43:20 crc kubenswrapper[5021]: I0121 15:43:20.054651 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f97c362-e247-4151-b007-2b3006b50488-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "8f97c362-e247-4151-b007-2b3006b50488" (UID: "8f97c362-e247-4151-b007-2b3006b50488"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:43:20 crc kubenswrapper[5021]: I0121 15:43:20.056924 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f97c362-e247-4151-b007-2b3006b50488-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8f97c362-e247-4151-b007-2b3006b50488" (UID: "8f97c362-e247-4151-b007-2b3006b50488"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:43:20 crc kubenswrapper[5021]: I0121 15:43:20.061405 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f97c362-e247-4151-b007-2b3006b50488-scripts" (OuterVolumeSpecName: "scripts") pod "8f97c362-e247-4151-b007-2b3006b50488" (UID: "8f97c362-e247-4151-b007-2b3006b50488"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:43:20 crc kubenswrapper[5021]: I0121 15:43:20.130921 5021 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/8f97c362-e247-4151-b007-2b3006b50488-swiftconf\") on node \"crc\" DevicePath \"\"" Jan 21 15:43:20 crc kubenswrapper[5021]: I0121 15:43:20.130954 5021 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/8f97c362-e247-4151-b007-2b3006b50488-ring-data-devices\") on node \"crc\" DevicePath \"\"" Jan 21 15:43:20 crc kubenswrapper[5021]: I0121 15:43:20.130967 5021 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/8f97c362-e247-4151-b007-2b3006b50488-dispersionconf\") on node \"crc\" DevicePath \"\"" Jan 21 15:43:20 crc kubenswrapper[5021]: I0121 15:43:20.130976 5021 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8f97c362-e247-4151-b007-2b3006b50488-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:43:20 crc kubenswrapper[5021]: I0121 15:43:20.130986 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hfnnp\" (UniqueName: \"kubernetes.io/projected/8f97c362-e247-4151-b007-2b3006b50488-kube-api-access-hfnnp\") on node \"crc\" DevicePath \"\"" Jan 21 15:43:20 crc kubenswrapper[5021]: I0121 15:43:20.130998 5021 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/8f97c362-e247-4151-b007-2b3006b50488-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 21 15:43:20 crc kubenswrapper[5021]: I0121 15:43:20.131009 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f97c362-e247-4151-b007-2b3006b50488-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:43:20 crc kubenswrapper[5021]: I0121 15:43:20.527401 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-fxrjc" event={"ID":"d8a758f3-9e27-4c1b-917e-c4a16c5f3342","Type":"ContainerStarted","Data":"e4d8cebebda28f065fc3c55292b1eafcf78e8af726bb99a63b7e36fea292dfcd"} Jan 21 15:43:20 crc kubenswrapper[5021]: I0121 15:43:20.527461 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-fxrjc" event={"ID":"d8a758f3-9e27-4c1b-917e-c4a16c5f3342","Type":"ContainerStarted","Data":"7d2931740bb8f8bf49c3320803078edb2a0361c09c3a92e2163ce5ecc2b1c8c6"} Jan 21 15:43:20 crc kubenswrapper[5021]: I0121 15:43:20.529022 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-86fx8" event={"ID":"8f97c362-e247-4151-b007-2b3006b50488","Type":"ContainerDied","Data":"ce26cbf095a6fad5e3b1e1fc4459e247482fbcde46bfc6a282fda29cf17ef670"} Jan 21 15:43:20 crc kubenswrapper[5021]: I0121 15:43:20.529056 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ce26cbf095a6fad5e3b1e1fc4459e247482fbcde46bfc6a282fda29cf17ef670" Jan 21 15:43:20 crc kubenswrapper[5021]: I0121 15:43:20.529076 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-86fx8" Jan 21 15:43:20 crc kubenswrapper[5021]: I0121 15:43:20.555128 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/root-account-create-update-fxrjc" podStartSLOduration=2.555092159 podStartE2EDuration="2.555092159s" podCreationTimestamp="2026-01-21 15:43:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:43:20.554518504 +0000 UTC m=+1142.089632393" watchObservedRunningTime="2026-01-21 15:43:20.555092159 +0000 UTC m=+1142.090206058" Jan 21 15:43:20 crc kubenswrapper[5021]: I0121 15:43:20.760290 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="88f7e9ea-7874-47ee-8766-0279d2ea35b5" path="/var/lib/kubelet/pods/88f7e9ea-7874-47ee-8766-0279d2ea35b5/volumes" Jan 21 15:43:21 crc kubenswrapper[5021]: I0121 15:43:21.537793 5021 generic.go:334] "Generic (PLEG): container finished" podID="d8a758f3-9e27-4c1b-917e-c4a16c5f3342" containerID="e4d8cebebda28f065fc3c55292b1eafcf78e8af726bb99a63b7e36fea292dfcd" exitCode=0 Jan 21 15:43:21 crc kubenswrapper[5021]: I0121 15:43:21.537845 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-fxrjc" event={"ID":"d8a758f3-9e27-4c1b-917e-c4a16c5f3342","Type":"ContainerDied","Data":"e4d8cebebda28f065fc3c55292b1eafcf78e8af726bb99a63b7e36fea292dfcd"} Jan 21 15:43:22 crc kubenswrapper[5021]: I0121 15:43:22.180957 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-8jhv7"] Jan 21 15:43:22 crc kubenswrapper[5021]: E0121 15:43:22.181494 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f97c362-e247-4151-b007-2b3006b50488" containerName="swift-ring-rebalance" Jan 21 15:43:22 crc kubenswrapper[5021]: I0121 15:43:22.181519 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f97c362-e247-4151-b007-2b3006b50488" containerName="swift-ring-rebalance" Jan 21 15:43:22 crc kubenswrapper[5021]: I0121 15:43:22.181808 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f97c362-e247-4151-b007-2b3006b50488" containerName="swift-ring-rebalance" Jan 21 15:43:22 crc kubenswrapper[5021]: I0121 15:43:22.183292 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-8jhv7" Jan 21 15:43:22 crc kubenswrapper[5021]: I0121 15:43:22.186814 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Jan 21 15:43:22 crc kubenswrapper[5021]: I0121 15:43:22.187053 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-d767q" Jan 21 15:43:22 crc kubenswrapper[5021]: I0121 15:43:22.201340 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-8jhv7"] Jan 21 15:43:22 crc kubenswrapper[5021]: I0121 15:43:22.265654 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c39660b0-c6d5-4b6e-95b8-12b8fbf38a14-config-data\") pod \"glance-db-sync-8jhv7\" (UID: \"c39660b0-c6d5-4b6e-95b8-12b8fbf38a14\") " pod="openstack/glance-db-sync-8jhv7" Jan 21 15:43:22 crc kubenswrapper[5021]: I0121 15:43:22.265731 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m8sv4\" (UniqueName: \"kubernetes.io/projected/c39660b0-c6d5-4b6e-95b8-12b8fbf38a14-kube-api-access-m8sv4\") pod \"glance-db-sync-8jhv7\" (UID: \"c39660b0-c6d5-4b6e-95b8-12b8fbf38a14\") " pod="openstack/glance-db-sync-8jhv7" Jan 21 15:43:22 crc kubenswrapper[5021]: I0121 15:43:22.265924 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c39660b0-c6d5-4b6e-95b8-12b8fbf38a14-db-sync-config-data\") pod \"glance-db-sync-8jhv7\" (UID: \"c39660b0-c6d5-4b6e-95b8-12b8fbf38a14\") " pod="openstack/glance-db-sync-8jhv7" Jan 21 15:43:22 crc kubenswrapper[5021]: I0121 15:43:22.266056 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c39660b0-c6d5-4b6e-95b8-12b8fbf38a14-combined-ca-bundle\") pod \"glance-db-sync-8jhv7\" (UID: \"c39660b0-c6d5-4b6e-95b8-12b8fbf38a14\") " pod="openstack/glance-db-sync-8jhv7" Jan 21 15:43:22 crc kubenswrapper[5021]: I0121 15:43:22.368246 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c39660b0-c6d5-4b6e-95b8-12b8fbf38a14-config-data\") pod \"glance-db-sync-8jhv7\" (UID: \"c39660b0-c6d5-4b6e-95b8-12b8fbf38a14\") " pod="openstack/glance-db-sync-8jhv7" Jan 21 15:43:22 crc kubenswrapper[5021]: I0121 15:43:22.368321 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m8sv4\" (UniqueName: \"kubernetes.io/projected/c39660b0-c6d5-4b6e-95b8-12b8fbf38a14-kube-api-access-m8sv4\") pod \"glance-db-sync-8jhv7\" (UID: \"c39660b0-c6d5-4b6e-95b8-12b8fbf38a14\") " pod="openstack/glance-db-sync-8jhv7" Jan 21 15:43:22 crc kubenswrapper[5021]: I0121 15:43:22.368369 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c39660b0-c6d5-4b6e-95b8-12b8fbf38a14-db-sync-config-data\") pod \"glance-db-sync-8jhv7\" (UID: \"c39660b0-c6d5-4b6e-95b8-12b8fbf38a14\") " pod="openstack/glance-db-sync-8jhv7" Jan 21 15:43:22 crc kubenswrapper[5021]: I0121 15:43:22.368411 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c39660b0-c6d5-4b6e-95b8-12b8fbf38a14-combined-ca-bundle\") pod \"glance-db-sync-8jhv7\" (UID: \"c39660b0-c6d5-4b6e-95b8-12b8fbf38a14\") " pod="openstack/glance-db-sync-8jhv7" Jan 21 15:43:22 crc kubenswrapper[5021]: I0121 15:43:22.375316 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c39660b0-c6d5-4b6e-95b8-12b8fbf38a14-db-sync-config-data\") pod \"glance-db-sync-8jhv7\" (UID: \"c39660b0-c6d5-4b6e-95b8-12b8fbf38a14\") " pod="openstack/glance-db-sync-8jhv7" Jan 21 15:43:22 crc kubenswrapper[5021]: I0121 15:43:22.375338 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c39660b0-c6d5-4b6e-95b8-12b8fbf38a14-config-data\") pod \"glance-db-sync-8jhv7\" (UID: \"c39660b0-c6d5-4b6e-95b8-12b8fbf38a14\") " pod="openstack/glance-db-sync-8jhv7" Jan 21 15:43:22 crc kubenswrapper[5021]: I0121 15:43:22.375634 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c39660b0-c6d5-4b6e-95b8-12b8fbf38a14-combined-ca-bundle\") pod \"glance-db-sync-8jhv7\" (UID: \"c39660b0-c6d5-4b6e-95b8-12b8fbf38a14\") " pod="openstack/glance-db-sync-8jhv7" Jan 21 15:43:22 crc kubenswrapper[5021]: I0121 15:43:22.387417 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m8sv4\" (UniqueName: \"kubernetes.io/projected/c39660b0-c6d5-4b6e-95b8-12b8fbf38a14-kube-api-access-m8sv4\") pod \"glance-db-sync-8jhv7\" (UID: \"c39660b0-c6d5-4b6e-95b8-12b8fbf38a14\") " pod="openstack/glance-db-sync-8jhv7" Jan 21 15:43:22 crc kubenswrapper[5021]: I0121 15:43:22.502584 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-8jhv7" Jan 21 15:43:22 crc kubenswrapper[5021]: I0121 15:43:22.880984 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-fxrjc" Jan 21 15:43:22 crc kubenswrapper[5021]: I0121 15:43:22.978288 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pch6k\" (UniqueName: \"kubernetes.io/projected/d8a758f3-9e27-4c1b-917e-c4a16c5f3342-kube-api-access-pch6k\") pod \"d8a758f3-9e27-4c1b-917e-c4a16c5f3342\" (UID: \"d8a758f3-9e27-4c1b-917e-c4a16c5f3342\") " Jan 21 15:43:22 crc kubenswrapper[5021]: I0121 15:43:22.978376 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d8a758f3-9e27-4c1b-917e-c4a16c5f3342-operator-scripts\") pod \"d8a758f3-9e27-4c1b-917e-c4a16c5f3342\" (UID: \"d8a758f3-9e27-4c1b-917e-c4a16c5f3342\") " Jan 21 15:43:22 crc kubenswrapper[5021]: I0121 15:43:22.980130 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d8a758f3-9e27-4c1b-917e-c4a16c5f3342-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d8a758f3-9e27-4c1b-917e-c4a16c5f3342" (UID: "d8a758f3-9e27-4c1b-917e-c4a16c5f3342"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:43:22 crc kubenswrapper[5021]: I0121 15:43:22.985286 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8a758f3-9e27-4c1b-917e-c4a16c5f3342-kube-api-access-pch6k" (OuterVolumeSpecName: "kube-api-access-pch6k") pod "d8a758f3-9e27-4c1b-917e-c4a16c5f3342" (UID: "d8a758f3-9e27-4c1b-917e-c4a16c5f3342"). InnerVolumeSpecName "kube-api-access-pch6k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:43:23 crc kubenswrapper[5021]: I0121 15:43:23.080217 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pch6k\" (UniqueName: \"kubernetes.io/projected/d8a758f3-9e27-4c1b-917e-c4a16c5f3342-kube-api-access-pch6k\") on node \"crc\" DevicePath \"\"" Jan 21 15:43:23 crc kubenswrapper[5021]: I0121 15:43:23.080271 5021 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d8a758f3-9e27-4c1b-917e-c4a16c5f3342-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:43:23 crc kubenswrapper[5021]: I0121 15:43:23.187603 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-8jhv7"] Jan 21 15:43:23 crc kubenswrapper[5021]: I0121 15:43:23.560667 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c57ca8a9-e2f8-4404-b56f-649297cba618","Type":"ContainerStarted","Data":"0d1d92941497d1a0e50ff5085e977bdf5928704f84bf870731efcc8fcb1d2f1c"} Jan 21 15:43:23 crc kubenswrapper[5021]: I0121 15:43:23.560715 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c57ca8a9-e2f8-4404-b56f-649297cba618","Type":"ContainerStarted","Data":"c5661f129fdffc8a3bd461399ca660bc553970556f3c6af116113c924c302646"} Jan 21 15:43:23 crc kubenswrapper[5021]: I0121 15:43:23.560730 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c57ca8a9-e2f8-4404-b56f-649297cba618","Type":"ContainerStarted","Data":"5801c40336a7430220f5050e7b4c6fc8997538d48d91daa78538847e03eb5b9c"} Jan 21 15:43:23 crc kubenswrapper[5021]: I0121 15:43:23.562061 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-8jhv7" event={"ID":"c39660b0-c6d5-4b6e-95b8-12b8fbf38a14","Type":"ContainerStarted","Data":"3df90f1f0b422311705dfaad4ebece1648e025159178be7dfd313298fd90619c"} Jan 21 15:43:23 crc kubenswrapper[5021]: I0121 15:43:23.563369 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-fxrjc" event={"ID":"d8a758f3-9e27-4c1b-917e-c4a16c5f3342","Type":"ContainerDied","Data":"7d2931740bb8f8bf49c3320803078edb2a0361c09c3a92e2163ce5ecc2b1c8c6"} Jan 21 15:43:23 crc kubenswrapper[5021]: I0121 15:43:23.563532 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7d2931740bb8f8bf49c3320803078edb2a0361c09c3a92e2163ce5ecc2b1c8c6" Jan 21 15:43:23 crc kubenswrapper[5021]: I0121 15:43:23.563424 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-fxrjc" Jan 21 15:43:24 crc kubenswrapper[5021]: I0121 15:43:24.588509 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c57ca8a9-e2f8-4404-b56f-649297cba618","Type":"ContainerStarted","Data":"26edaec702317ca592975b15ba32e49f2dbd21f92807d5d36fce7823804ed53c"} Jan 21 15:43:25 crc kubenswrapper[5021]: I0121 15:43:25.258582 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-fxrjc"] Jan 21 15:43:25 crc kubenswrapper[5021]: I0121 15:43:25.287492 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-fxrjc"] Jan 21 15:43:25 crc kubenswrapper[5021]: I0121 15:43:25.599042 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c57ca8a9-e2f8-4404-b56f-649297cba618","Type":"ContainerStarted","Data":"de92f672063435b72b37aaebf43b6130f273d9c86bb2fbfd7c96ca15e567638a"} Jan 21 15:43:25 crc kubenswrapper[5021]: I0121 15:43:25.599129 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c57ca8a9-e2f8-4404-b56f-649297cba618","Type":"ContainerStarted","Data":"6107623cd8f4072bc502c561748852925819d887d3f75272057a2e95b4ad1df7"} Jan 21 15:43:26 crc kubenswrapper[5021]: I0121 15:43:26.749770 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d8a758f3-9e27-4c1b-917e-c4a16c5f3342" path="/var/lib/kubelet/pods/d8a758f3-9e27-4c1b-917e-c4a16c5f3342/volumes" Jan 21 15:43:27 crc kubenswrapper[5021]: I0121 15:43:27.620599 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c57ca8a9-e2f8-4404-b56f-649297cba618","Type":"ContainerStarted","Data":"ebcb5861aa10209409b721ea6e382ae9a04e2327d3329449b46709721ed4a126"} Jan 21 15:43:28 crc kubenswrapper[5021]: I0121 15:43:28.359125 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Jan 21 15:43:28 crc kubenswrapper[5021]: I0121 15:43:28.502131 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Jan 21 15:43:28 crc kubenswrapper[5021]: I0121 15:43:28.648838 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c57ca8a9-e2f8-4404-b56f-649297cba618","Type":"ContainerStarted","Data":"1ca716b9f11f9eb3707f3cd9724e75ee4eb6224c4c1e84903f22f728f45b5a6e"} Jan 21 15:43:28 crc kubenswrapper[5021]: I0121 15:43:28.758056 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-44zpx"] Jan 21 15:43:28 crc kubenswrapper[5021]: E0121 15:43:28.758335 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8a758f3-9e27-4c1b-917e-c4a16c5f3342" containerName="mariadb-account-create-update" Jan 21 15:43:28 crc kubenswrapper[5021]: I0121 15:43:28.758349 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8a758f3-9e27-4c1b-917e-c4a16c5f3342" containerName="mariadb-account-create-update" Jan 21 15:43:28 crc kubenswrapper[5021]: I0121 15:43:28.758523 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8a758f3-9e27-4c1b-917e-c4a16c5f3342" containerName="mariadb-account-create-update" Jan 21 15:43:28 crc kubenswrapper[5021]: I0121 15:43:28.759284 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-44zpx" Jan 21 15:43:28 crc kubenswrapper[5021]: I0121 15:43:28.771143 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-10e4-account-create-update-vpspz"] Jan 21 15:43:28 crc kubenswrapper[5021]: I0121 15:43:28.772614 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-10e4-account-create-update-vpspz" Jan 21 15:43:28 crc kubenswrapper[5021]: I0121 15:43:28.774615 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Jan 21 15:43:28 crc kubenswrapper[5021]: I0121 15:43:28.784246 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-44zpx"] Jan 21 15:43:28 crc kubenswrapper[5021]: I0121 15:43:28.792612 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-10e4-account-create-update-vpspz"] Jan 21 15:43:28 crc kubenswrapper[5021]: I0121 15:43:28.861327 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-wfhkk"] Jan 21 15:43:28 crc kubenswrapper[5021]: I0121 15:43:28.862455 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-wfhkk" Jan 21 15:43:28 crc kubenswrapper[5021]: I0121 15:43:28.878243 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-wfhkk"] Jan 21 15:43:28 crc kubenswrapper[5021]: I0121 15:43:28.887127 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-8c4f-account-create-update-jb2q4"] Jan 21 15:43:28 crc kubenswrapper[5021]: I0121 15:43:28.888125 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tclzp\" (UniqueName: \"kubernetes.io/projected/da995ed3-1cf2-4f8e-ba7f-9780821e31f3-kube-api-access-tclzp\") pod \"barbican-10e4-account-create-update-vpspz\" (UID: \"da995ed3-1cf2-4f8e-ba7f-9780821e31f3\") " pod="openstack/barbican-10e4-account-create-update-vpspz" Jan 21 15:43:28 crc kubenswrapper[5021]: I0121 15:43:28.888182 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/085747f2-d183-4dc9-89dc-91a732a1d6b0-operator-scripts\") pod \"cinder-db-create-44zpx\" (UID: \"085747f2-d183-4dc9-89dc-91a732a1d6b0\") " pod="openstack/cinder-db-create-44zpx" Jan 21 15:43:28 crc kubenswrapper[5021]: I0121 15:43:28.888216 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-stpqs\" (UniqueName: \"kubernetes.io/projected/085747f2-d183-4dc9-89dc-91a732a1d6b0-kube-api-access-stpqs\") pod \"cinder-db-create-44zpx\" (UID: \"085747f2-d183-4dc9-89dc-91a732a1d6b0\") " pod="openstack/cinder-db-create-44zpx" Jan 21 15:43:28 crc kubenswrapper[5021]: I0121 15:43:28.888266 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/da995ed3-1cf2-4f8e-ba7f-9780821e31f3-operator-scripts\") pod \"barbican-10e4-account-create-update-vpspz\" (UID: \"da995ed3-1cf2-4f8e-ba7f-9780821e31f3\") " pod="openstack/barbican-10e4-account-create-update-vpspz" Jan 21 15:43:28 crc kubenswrapper[5021]: I0121 15:43:28.888611 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-8c4f-account-create-update-jb2q4" Jan 21 15:43:28 crc kubenswrapper[5021]: I0121 15:43:28.893003 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Jan 21 15:43:28 crc kubenswrapper[5021]: I0121 15:43:28.917462 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-8c4f-account-create-update-jb2q4"] Jan 21 15:43:28 crc kubenswrapper[5021]: I0121 15:43:28.956234 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-th5wh"] Jan 21 15:43:28 crc kubenswrapper[5021]: I0121 15:43:28.957333 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-th5wh" Jan 21 15:43:28 crc kubenswrapper[5021]: I0121 15:43:28.962408 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-mariadb-root-db-secret" Jan 21 15:43:28 crc kubenswrapper[5021]: I0121 15:43:28.984026 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-th5wh"] Jan 21 15:43:28 crc kubenswrapper[5021]: I0121 15:43:28.992718 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/da995ed3-1cf2-4f8e-ba7f-9780821e31f3-operator-scripts\") pod \"barbican-10e4-account-create-update-vpspz\" (UID: \"da995ed3-1cf2-4f8e-ba7f-9780821e31f3\") " pod="openstack/barbican-10e4-account-create-update-vpspz" Jan 21 15:43:28 crc kubenswrapper[5021]: I0121 15:43:28.992863 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/03d8cc6b-a5fc-4f8c-9b94-a3d54114278f-operator-scripts\") pod \"cinder-8c4f-account-create-update-jb2q4\" (UID: \"03d8cc6b-a5fc-4f8c-9b94-a3d54114278f\") " pod="openstack/cinder-8c4f-account-create-update-jb2q4" Jan 21 15:43:28 crc kubenswrapper[5021]: I0121 15:43:28.992953 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l9jzs\" (UniqueName: \"kubernetes.io/projected/03d8cc6b-a5fc-4f8c-9b94-a3d54114278f-kube-api-access-l9jzs\") pod \"cinder-8c4f-account-create-update-jb2q4\" (UID: \"03d8cc6b-a5fc-4f8c-9b94-a3d54114278f\") " pod="openstack/cinder-8c4f-account-create-update-jb2q4" Jan 21 15:43:28 crc kubenswrapper[5021]: I0121 15:43:28.992974 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7f2d2566-dcbf-437d-bf8b-32d6a49b34aa-operator-scripts\") pod \"barbican-db-create-wfhkk\" (UID: \"7f2d2566-dcbf-437d-bf8b-32d6a49b34aa\") " pod="openstack/barbican-db-create-wfhkk" Jan 21 15:43:28 crc kubenswrapper[5021]: I0121 15:43:28.993003 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wm8vr\" (UniqueName: \"kubernetes.io/projected/7f2d2566-dcbf-437d-bf8b-32d6a49b34aa-kube-api-access-wm8vr\") pod \"barbican-db-create-wfhkk\" (UID: \"7f2d2566-dcbf-437d-bf8b-32d6a49b34aa\") " pod="openstack/barbican-db-create-wfhkk" Jan 21 15:43:28 crc kubenswrapper[5021]: I0121 15:43:28.993089 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tclzp\" (UniqueName: \"kubernetes.io/projected/da995ed3-1cf2-4f8e-ba7f-9780821e31f3-kube-api-access-tclzp\") pod \"barbican-10e4-account-create-update-vpspz\" (UID: \"da995ed3-1cf2-4f8e-ba7f-9780821e31f3\") " pod="openstack/barbican-10e4-account-create-update-vpspz" Jan 21 15:43:28 crc kubenswrapper[5021]: I0121 15:43:28.993127 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/085747f2-d183-4dc9-89dc-91a732a1d6b0-operator-scripts\") pod \"cinder-db-create-44zpx\" (UID: \"085747f2-d183-4dc9-89dc-91a732a1d6b0\") " pod="openstack/cinder-db-create-44zpx" Jan 21 15:43:28 crc kubenswrapper[5021]: I0121 15:43:28.993161 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-stpqs\" (UniqueName: \"kubernetes.io/projected/085747f2-d183-4dc9-89dc-91a732a1d6b0-kube-api-access-stpqs\") pod \"cinder-db-create-44zpx\" (UID: \"085747f2-d183-4dc9-89dc-91a732a1d6b0\") " pod="openstack/cinder-db-create-44zpx" Jan 21 15:43:28 crc kubenswrapper[5021]: I0121 15:43:28.994148 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/da995ed3-1cf2-4f8e-ba7f-9780821e31f3-operator-scripts\") pod \"barbican-10e4-account-create-update-vpspz\" (UID: \"da995ed3-1cf2-4f8e-ba7f-9780821e31f3\") " pod="openstack/barbican-10e4-account-create-update-vpspz" Jan 21 15:43:28 crc kubenswrapper[5021]: I0121 15:43:28.995564 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/085747f2-d183-4dc9-89dc-91a732a1d6b0-operator-scripts\") pod \"cinder-db-create-44zpx\" (UID: \"085747f2-d183-4dc9-89dc-91a732a1d6b0\") " pod="openstack/cinder-db-create-44zpx" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.020726 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tclzp\" (UniqueName: \"kubernetes.io/projected/da995ed3-1cf2-4f8e-ba7f-9780821e31f3-kube-api-access-tclzp\") pod \"barbican-10e4-account-create-update-vpspz\" (UID: \"da995ed3-1cf2-4f8e-ba7f-9780821e31f3\") " pod="openstack/barbican-10e4-account-create-update-vpspz" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.024502 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-stpqs\" (UniqueName: \"kubernetes.io/projected/085747f2-d183-4dc9-89dc-91a732a1d6b0-kube-api-access-stpqs\") pod \"cinder-db-create-44zpx\" (UID: \"085747f2-d183-4dc9-89dc-91a732a1d6b0\") " pod="openstack/cinder-db-create-44zpx" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.053585 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-xtcpm"] Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.055064 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-xtcpm" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.059520 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.060013 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.060198 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.060379 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-8kl7s" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.076379 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-xtcpm"] Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.084880 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-44zpx" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.094087 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-10e4-account-create-update-vpspz" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.095096 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7cnz6\" (UniqueName: \"kubernetes.io/projected/744bc068-76e3-4357-8794-a9c58add89c9-kube-api-access-7cnz6\") pod \"root-account-create-update-th5wh\" (UID: \"744bc068-76e3-4357-8794-a9c58add89c9\") " pod="openstack/root-account-create-update-th5wh" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.095403 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/744bc068-76e3-4357-8794-a9c58add89c9-operator-scripts\") pod \"root-account-create-update-th5wh\" (UID: \"744bc068-76e3-4357-8794-a9c58add89c9\") " pod="openstack/root-account-create-update-th5wh" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.095526 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/03d8cc6b-a5fc-4f8c-9b94-a3d54114278f-operator-scripts\") pod \"cinder-8c4f-account-create-update-jb2q4\" (UID: \"03d8cc6b-a5fc-4f8c-9b94-a3d54114278f\") " pod="openstack/cinder-8c4f-account-create-update-jb2q4" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.095614 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7f2d2566-dcbf-437d-bf8b-32d6a49b34aa-operator-scripts\") pod \"barbican-db-create-wfhkk\" (UID: \"7f2d2566-dcbf-437d-bf8b-32d6a49b34aa\") " pod="openstack/barbican-db-create-wfhkk" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.095660 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l9jzs\" (UniqueName: \"kubernetes.io/projected/03d8cc6b-a5fc-4f8c-9b94-a3d54114278f-kube-api-access-l9jzs\") pod \"cinder-8c4f-account-create-update-jb2q4\" (UID: \"03d8cc6b-a5fc-4f8c-9b94-a3d54114278f\") " pod="openstack/cinder-8c4f-account-create-update-jb2q4" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.095699 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wm8vr\" (UniqueName: \"kubernetes.io/projected/7f2d2566-dcbf-437d-bf8b-32d6a49b34aa-kube-api-access-wm8vr\") pod \"barbican-db-create-wfhkk\" (UID: \"7f2d2566-dcbf-437d-bf8b-32d6a49b34aa\") " pod="openstack/barbican-db-create-wfhkk" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.096738 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/03d8cc6b-a5fc-4f8c-9b94-a3d54114278f-operator-scripts\") pod \"cinder-8c4f-account-create-update-jb2q4\" (UID: \"03d8cc6b-a5fc-4f8c-9b94-a3d54114278f\") " pod="openstack/cinder-8c4f-account-create-update-jb2q4" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.103570 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7f2d2566-dcbf-437d-bf8b-32d6a49b34aa-operator-scripts\") pod \"barbican-db-create-wfhkk\" (UID: \"7f2d2566-dcbf-437d-bf8b-32d6a49b34aa\") " pod="openstack/barbican-db-create-wfhkk" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.117060 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wm8vr\" (UniqueName: \"kubernetes.io/projected/7f2d2566-dcbf-437d-bf8b-32d6a49b34aa-kube-api-access-wm8vr\") pod \"barbican-db-create-wfhkk\" (UID: \"7f2d2566-dcbf-437d-bf8b-32d6a49b34aa\") " pod="openstack/barbican-db-create-wfhkk" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.124566 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l9jzs\" (UniqueName: \"kubernetes.io/projected/03d8cc6b-a5fc-4f8c-9b94-a3d54114278f-kube-api-access-l9jzs\") pod \"cinder-8c4f-account-create-update-jb2q4\" (UID: \"03d8cc6b-a5fc-4f8c-9b94-a3d54114278f\") " pod="openstack/cinder-8c4f-account-create-update-jb2q4" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.147023 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-r4jkd"] Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.166963 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-r4jkd" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.177601 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-r4jkd"] Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.192435 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-wfhkk" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.197360 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9984d786-ae3e-4cfe-8bf6-099159dada65-config-data\") pod \"keystone-db-sync-xtcpm\" (UID: \"9984d786-ae3e-4cfe-8bf6-099159dada65\") " pod="openstack/keystone-db-sync-xtcpm" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.197421 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9984d786-ae3e-4cfe-8bf6-099159dada65-combined-ca-bundle\") pod \"keystone-db-sync-xtcpm\" (UID: \"9984d786-ae3e-4cfe-8bf6-099159dada65\") " pod="openstack/keystone-db-sync-xtcpm" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.197610 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7cnz6\" (UniqueName: \"kubernetes.io/projected/744bc068-76e3-4357-8794-a9c58add89c9-kube-api-access-7cnz6\") pod \"root-account-create-update-th5wh\" (UID: \"744bc068-76e3-4357-8794-a9c58add89c9\") " pod="openstack/root-account-create-update-th5wh" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.197633 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/744bc068-76e3-4357-8794-a9c58add89c9-operator-scripts\") pod \"root-account-create-update-th5wh\" (UID: \"744bc068-76e3-4357-8794-a9c58add89c9\") " pod="openstack/root-account-create-update-th5wh" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.197675 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tjmph\" (UniqueName: \"kubernetes.io/projected/9984d786-ae3e-4cfe-8bf6-099159dada65-kube-api-access-tjmph\") pod \"keystone-db-sync-xtcpm\" (UID: \"9984d786-ae3e-4cfe-8bf6-099159dada65\") " pod="openstack/keystone-db-sync-xtcpm" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.199147 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/744bc068-76e3-4357-8794-a9c58add89c9-operator-scripts\") pod \"root-account-create-update-th5wh\" (UID: \"744bc068-76e3-4357-8794-a9c58add89c9\") " pod="openstack/root-account-create-update-th5wh" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.223522 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-8c4f-account-create-update-jb2q4" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.238184 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7cnz6\" (UniqueName: \"kubernetes.io/projected/744bc068-76e3-4357-8794-a9c58add89c9-kube-api-access-7cnz6\") pod \"root-account-create-update-th5wh\" (UID: \"744bc068-76e3-4357-8794-a9c58add89c9\") " pod="openstack/root-account-create-update-th5wh" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.285344 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-th5wh" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.298958 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tjmph\" (UniqueName: \"kubernetes.io/projected/9984d786-ae3e-4cfe-8bf6-099159dada65-kube-api-access-tjmph\") pod \"keystone-db-sync-xtcpm\" (UID: \"9984d786-ae3e-4cfe-8bf6-099159dada65\") " pod="openstack/keystone-db-sync-xtcpm" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.299028 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9984d786-ae3e-4cfe-8bf6-099159dada65-config-data\") pod \"keystone-db-sync-xtcpm\" (UID: \"9984d786-ae3e-4cfe-8bf6-099159dada65\") " pod="openstack/keystone-db-sync-xtcpm" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.299064 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/624bb493-3f8a-4a62-993b-f66ccc317cc9-operator-scripts\") pod \"neutron-db-create-r4jkd\" (UID: \"624bb493-3f8a-4a62-993b-f66ccc317cc9\") " pod="openstack/neutron-db-create-r4jkd" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.299104 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9984d786-ae3e-4cfe-8bf6-099159dada65-combined-ca-bundle\") pod \"keystone-db-sync-xtcpm\" (UID: \"9984d786-ae3e-4cfe-8bf6-099159dada65\") " pod="openstack/keystone-db-sync-xtcpm" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.299222 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jwx4z\" (UniqueName: \"kubernetes.io/projected/624bb493-3f8a-4a62-993b-f66ccc317cc9-kube-api-access-jwx4z\") pod \"neutron-db-create-r4jkd\" (UID: \"624bb493-3f8a-4a62-993b-f66ccc317cc9\") " pod="openstack/neutron-db-create-r4jkd" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.323521 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9984d786-ae3e-4cfe-8bf6-099159dada65-config-data\") pod \"keystone-db-sync-xtcpm\" (UID: \"9984d786-ae3e-4cfe-8bf6-099159dada65\") " pod="openstack/keystone-db-sync-xtcpm" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.324110 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9984d786-ae3e-4cfe-8bf6-099159dada65-combined-ca-bundle\") pod \"keystone-db-sync-xtcpm\" (UID: \"9984d786-ae3e-4cfe-8bf6-099159dada65\") " pod="openstack/keystone-db-sync-xtcpm" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.328762 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tjmph\" (UniqueName: \"kubernetes.io/projected/9984d786-ae3e-4cfe-8bf6-099159dada65-kube-api-access-tjmph\") pod \"keystone-db-sync-xtcpm\" (UID: \"9984d786-ae3e-4cfe-8bf6-099159dada65\") " pod="openstack/keystone-db-sync-xtcpm" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.361456 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-a8a5-account-create-update-x4n5x"] Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.363583 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-a8a5-account-create-update-x4n5x" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.370701 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.375958 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-a8a5-account-create-update-x4n5x"] Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.401369 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jwx4z\" (UniqueName: \"kubernetes.io/projected/624bb493-3f8a-4a62-993b-f66ccc317cc9-kube-api-access-jwx4z\") pod \"neutron-db-create-r4jkd\" (UID: \"624bb493-3f8a-4a62-993b-f66ccc317cc9\") " pod="openstack/neutron-db-create-r4jkd" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.401494 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/624bb493-3f8a-4a62-993b-f66ccc317cc9-operator-scripts\") pod \"neutron-db-create-r4jkd\" (UID: \"624bb493-3f8a-4a62-993b-f66ccc317cc9\") " pod="openstack/neutron-db-create-r4jkd" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.402300 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/624bb493-3f8a-4a62-993b-f66ccc317cc9-operator-scripts\") pod \"neutron-db-create-r4jkd\" (UID: \"624bb493-3f8a-4a62-993b-f66ccc317cc9\") " pod="openstack/neutron-db-create-r4jkd" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.419847 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jwx4z\" (UniqueName: \"kubernetes.io/projected/624bb493-3f8a-4a62-993b-f66ccc317cc9-kube-api-access-jwx4z\") pod \"neutron-db-create-r4jkd\" (UID: \"624bb493-3f8a-4a62-993b-f66ccc317cc9\") " pod="openstack/neutron-db-create-r4jkd" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.420256 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-xtcpm" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.502707 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tp6tt\" (UniqueName: \"kubernetes.io/projected/503da4ea-7c79-4bfe-b37b-d4db888b76f4-kube-api-access-tp6tt\") pod \"neutron-a8a5-account-create-update-x4n5x\" (UID: \"503da4ea-7c79-4bfe-b37b-d4db888b76f4\") " pod="openstack/neutron-a8a5-account-create-update-x4n5x" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.503228 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/503da4ea-7c79-4bfe-b37b-d4db888b76f4-operator-scripts\") pod \"neutron-a8a5-account-create-update-x4n5x\" (UID: \"503da4ea-7c79-4bfe-b37b-d4db888b76f4\") " pod="openstack/neutron-a8a5-account-create-update-x4n5x" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.503526 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-r4jkd" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.605210 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/503da4ea-7c79-4bfe-b37b-d4db888b76f4-operator-scripts\") pod \"neutron-a8a5-account-create-update-x4n5x\" (UID: \"503da4ea-7c79-4bfe-b37b-d4db888b76f4\") " pod="openstack/neutron-a8a5-account-create-update-x4n5x" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.605310 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tp6tt\" (UniqueName: \"kubernetes.io/projected/503da4ea-7c79-4bfe-b37b-d4db888b76f4-kube-api-access-tp6tt\") pod \"neutron-a8a5-account-create-update-x4n5x\" (UID: \"503da4ea-7c79-4bfe-b37b-d4db888b76f4\") " pod="openstack/neutron-a8a5-account-create-update-x4n5x" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.606348 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/503da4ea-7c79-4bfe-b37b-d4db888b76f4-operator-scripts\") pod \"neutron-a8a5-account-create-update-x4n5x\" (UID: \"503da4ea-7c79-4bfe-b37b-d4db888b76f4\") " pod="openstack/neutron-a8a5-account-create-update-x4n5x" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.625986 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tp6tt\" (UniqueName: \"kubernetes.io/projected/503da4ea-7c79-4bfe-b37b-d4db888b76f4-kube-api-access-tp6tt\") pod \"neutron-a8a5-account-create-update-x4n5x\" (UID: \"503da4ea-7c79-4bfe-b37b-d4db888b76f4\") " pod="openstack/neutron-a8a5-account-create-update-x4n5x" Jan 21 15:43:29 crc kubenswrapper[5021]: I0121 15:43:29.687420 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-a8a5-account-create-update-x4n5x" Jan 21 15:43:40 crc kubenswrapper[5021]: E0121 15:43:40.345654 5021 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-glance-api:current-podified" Jan 21 15:43:40 crc kubenswrapper[5021]: E0121 15:43:40.346426 5021 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:glance-db-sync,Image:quay.io/podified-antelope-centos9/openstack-glance-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/glance/glance.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-m8sv4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42415,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42415,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-db-sync-8jhv7_openstack(c39660b0-c6d5-4b6e-95b8-12b8fbf38a14): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 21 15:43:40 crc kubenswrapper[5021]: E0121 15:43:40.348297 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/glance-db-sync-8jhv7" podUID="c39660b0-c6d5-4b6e-95b8-12b8fbf38a14" Jan 21 15:43:40 crc kubenswrapper[5021]: E0121 15:43:40.750175 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-glance-api:current-podified\\\"\"" pod="openstack/glance-db-sync-8jhv7" podUID="c39660b0-c6d5-4b6e-95b8-12b8fbf38a14" Jan 21 15:43:40 crc kubenswrapper[5021]: I0121 15:43:40.787220 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-8c4f-account-create-update-jb2q4"] Jan 21 15:43:40 crc kubenswrapper[5021]: W0121 15:43:40.799180 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod03d8cc6b_a5fc_4f8c_9b94_a3d54114278f.slice/crio-5f43382852aa165200504e3d6fa71b19180b5820be6e754b1711ac55cd038007 WatchSource:0}: Error finding container 5f43382852aa165200504e3d6fa71b19180b5820be6e754b1711ac55cd038007: Status 404 returned error can't find the container with id 5f43382852aa165200504e3d6fa71b19180b5820be6e754b1711ac55cd038007 Jan 21 15:43:40 crc kubenswrapper[5021]: I0121 15:43:40.934146 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-wfhkk"] Jan 21 15:43:40 crc kubenswrapper[5021]: W0121 15:43:40.938182 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7f2d2566_dcbf_437d_bf8b_32d6a49b34aa.slice/crio-18fda36c96301540a1017855690feab5faf0c1a9f2a6dee38af90bd32940076a WatchSource:0}: Error finding container 18fda36c96301540a1017855690feab5faf0c1a9f2a6dee38af90bd32940076a: Status 404 returned error can't find the container with id 18fda36c96301540a1017855690feab5faf0c1a9f2a6dee38af90bd32940076a Jan 21 15:43:40 crc kubenswrapper[5021]: W0121 15:43:40.939575 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod624bb493_3f8a_4a62_993b_f66ccc317cc9.slice/crio-53d4b0bb5cf7b6b3306676be5886c7b73a1fea9fc63456776e882a878f56a1ff WatchSource:0}: Error finding container 53d4b0bb5cf7b6b3306676be5886c7b73a1fea9fc63456776e882a878f56a1ff: Status 404 returned error can't find the container with id 53d4b0bb5cf7b6b3306676be5886c7b73a1fea9fc63456776e882a878f56a1ff Jan 21 15:43:40 crc kubenswrapper[5021]: I0121 15:43:40.943336 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-r4jkd"] Jan 21 15:43:41 crc kubenswrapper[5021]: I0121 15:43:41.014066 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-a8a5-account-create-update-x4n5x"] Jan 21 15:43:41 crc kubenswrapper[5021]: I0121 15:43:41.021091 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-xtcpm"] Jan 21 15:43:41 crc kubenswrapper[5021]: I0121 15:43:41.041197 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-10e4-account-create-update-vpspz"] Jan 21 15:43:41 crc kubenswrapper[5021]: I0121 15:43:41.110612 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-th5wh"] Jan 21 15:43:41 crc kubenswrapper[5021]: I0121 15:43:41.117952 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-44zpx"] Jan 21 15:43:41 crc kubenswrapper[5021]: I0121 15:43:41.755964 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-8c4f-account-create-update-jb2q4" event={"ID":"03d8cc6b-a5fc-4f8c-9b94-a3d54114278f","Type":"ContainerStarted","Data":"aac21446272396c3ae28db92c60ba6f45b63f13ffdfc027e64f67eb5ae638216"} Jan 21 15:43:41 crc kubenswrapper[5021]: I0121 15:43:41.756349 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-8c4f-account-create-update-jb2q4" event={"ID":"03d8cc6b-a5fc-4f8c-9b94-a3d54114278f","Type":"ContainerStarted","Data":"5f43382852aa165200504e3d6fa71b19180b5820be6e754b1711ac55cd038007"} Jan 21 15:43:41 crc kubenswrapper[5021]: I0121 15:43:41.758642 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-wfhkk" event={"ID":"7f2d2566-dcbf-437d-bf8b-32d6a49b34aa","Type":"ContainerStarted","Data":"57118c6cf18fe5a886fca20a8b4b0c89ee968b833cf0660219fa9f27ca939db1"} Jan 21 15:43:41 crc kubenswrapper[5021]: I0121 15:43:41.758676 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-wfhkk" event={"ID":"7f2d2566-dcbf-437d-bf8b-32d6a49b34aa","Type":"ContainerStarted","Data":"18fda36c96301540a1017855690feab5faf0c1a9f2a6dee38af90bd32940076a"} Jan 21 15:43:41 crc kubenswrapper[5021]: I0121 15:43:41.759656 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-r4jkd" event={"ID":"624bb493-3f8a-4a62-993b-f66ccc317cc9","Type":"ContainerStarted","Data":"53d4b0bb5cf7b6b3306676be5886c7b73a1fea9fc63456776e882a878f56a1ff"} Jan 21 15:43:41 crc kubenswrapper[5021]: W0121 15:43:41.884081 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod744bc068_76e3_4357_8794_a9c58add89c9.slice/crio-54b487c4156e4985da3994d6098ecb83dc7ad18888cb8f4f631cd21fa98635a6 WatchSource:0}: Error finding container 54b487c4156e4985da3994d6098ecb83dc7ad18888cb8f4f631cd21fa98635a6: Status 404 returned error can't find the container with id 54b487c4156e4985da3994d6098ecb83dc7ad18888cb8f4f631cd21fa98635a6 Jan 21 15:43:41 crc kubenswrapper[5021]: W0121 15:43:41.892159 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podda995ed3_1cf2_4f8e_ba7f_9780821e31f3.slice/crio-f8ae4c35dce168fc701bd172b1428fe37b02e26490a9bdb7c72aa6d4b7a79291 WatchSource:0}: Error finding container f8ae4c35dce168fc701bd172b1428fe37b02e26490a9bdb7c72aa6d4b7a79291: Status 404 returned error can't find the container with id f8ae4c35dce168fc701bd172b1428fe37b02e26490a9bdb7c72aa6d4b7a79291 Jan 21 15:43:41 crc kubenswrapper[5021]: W0121 15:43:41.895800 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod085747f2_d183_4dc9_89dc_91a732a1d6b0.slice/crio-8937e60dd705e9dc258072ce8e3af3f567bea4664db221b3beeb07b61aafa140 WatchSource:0}: Error finding container 8937e60dd705e9dc258072ce8e3af3f567bea4664db221b3beeb07b61aafa140: Status 404 returned error can't find the container with id 8937e60dd705e9dc258072ce8e3af3f567bea4664db221b3beeb07b61aafa140 Jan 21 15:43:42 crc kubenswrapper[5021]: I0121 15:43:42.356677 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 15:43:42 crc kubenswrapper[5021]: I0121 15:43:42.356731 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 15:43:42 crc kubenswrapper[5021]: I0121 15:43:42.784449 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-xtcpm" event={"ID":"9984d786-ae3e-4cfe-8bf6-099159dada65","Type":"ContainerStarted","Data":"f28ddf1e604d407678108f86c506c7a482d69f19116d88e07e9c4402ce32263a"} Jan 21 15:43:42 crc kubenswrapper[5021]: I0121 15:43:42.787483 5021 generic.go:334] "Generic (PLEG): container finished" podID="03d8cc6b-a5fc-4f8c-9b94-a3d54114278f" containerID="aac21446272396c3ae28db92c60ba6f45b63f13ffdfc027e64f67eb5ae638216" exitCode=0 Jan 21 15:43:42 crc kubenswrapper[5021]: I0121 15:43:42.787562 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-8c4f-account-create-update-jb2q4" event={"ID":"03d8cc6b-a5fc-4f8c-9b94-a3d54114278f","Type":"ContainerDied","Data":"aac21446272396c3ae28db92c60ba6f45b63f13ffdfc027e64f67eb5ae638216"} Jan 21 15:43:42 crc kubenswrapper[5021]: I0121 15:43:42.789216 5021 generic.go:334] "Generic (PLEG): container finished" podID="624bb493-3f8a-4a62-993b-f66ccc317cc9" containerID="6993e642b208ca097fabad301b80dadf17523b826c5d300e040f520c7609d465" exitCode=0 Jan 21 15:43:42 crc kubenswrapper[5021]: I0121 15:43:42.789275 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-r4jkd" event={"ID":"624bb493-3f8a-4a62-993b-f66ccc317cc9","Type":"ContainerDied","Data":"6993e642b208ca097fabad301b80dadf17523b826c5d300e040f520c7609d465"} Jan 21 15:43:42 crc kubenswrapper[5021]: I0121 15:43:42.791168 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-a8a5-account-create-update-x4n5x" event={"ID":"503da4ea-7c79-4bfe-b37b-d4db888b76f4","Type":"ContainerStarted","Data":"c45563f373ab91b4ce46332e301191ed228473ee66b6a53b2393f1dfc4a4b932"} Jan 21 15:43:42 crc kubenswrapper[5021]: I0121 15:43:42.791193 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-a8a5-account-create-update-x4n5x" event={"ID":"503da4ea-7c79-4bfe-b37b-d4db888b76f4","Type":"ContainerStarted","Data":"c1f359e472ae7b9d6a30cfe8b0e9fd3ba1f8bb062905fb755e14115937159a08"} Jan 21 15:43:42 crc kubenswrapper[5021]: I0121 15:43:42.794541 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-10e4-account-create-update-vpspz" event={"ID":"da995ed3-1cf2-4f8e-ba7f-9780821e31f3","Type":"ContainerStarted","Data":"cc610cc760d0bea04b57dbf46669247b99b993ae8d42b6b8d4657eee8f75076e"} Jan 21 15:43:42 crc kubenswrapper[5021]: I0121 15:43:42.794571 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-10e4-account-create-update-vpspz" event={"ID":"da995ed3-1cf2-4f8e-ba7f-9780821e31f3","Type":"ContainerStarted","Data":"f8ae4c35dce168fc701bd172b1428fe37b02e26490a9bdb7c72aa6d4b7a79291"} Jan 21 15:43:42 crc kubenswrapper[5021]: I0121 15:43:42.796283 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-th5wh" event={"ID":"744bc068-76e3-4357-8794-a9c58add89c9","Type":"ContainerStarted","Data":"1b8f2c8cd08e817b6588454ef88ddaf1746ece06c08ba02b29cbe9d2437827d0"} Jan 21 15:43:42 crc kubenswrapper[5021]: I0121 15:43:42.796313 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-th5wh" event={"ID":"744bc068-76e3-4357-8794-a9c58add89c9","Type":"ContainerStarted","Data":"54b487c4156e4985da3994d6098ecb83dc7ad18888cb8f4f631cd21fa98635a6"} Jan 21 15:43:42 crc kubenswrapper[5021]: I0121 15:43:42.802171 5021 generic.go:334] "Generic (PLEG): container finished" podID="7f2d2566-dcbf-437d-bf8b-32d6a49b34aa" containerID="57118c6cf18fe5a886fca20a8b4b0c89ee968b833cf0660219fa9f27ca939db1" exitCode=0 Jan 21 15:43:42 crc kubenswrapper[5021]: I0121 15:43:42.803973 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-wfhkk" event={"ID":"7f2d2566-dcbf-437d-bf8b-32d6a49b34aa","Type":"ContainerDied","Data":"57118c6cf18fe5a886fca20a8b4b0c89ee968b833cf0660219fa9f27ca939db1"} Jan 21 15:43:42 crc kubenswrapper[5021]: I0121 15:43:42.807056 5021 generic.go:334] "Generic (PLEG): container finished" podID="085747f2-d183-4dc9-89dc-91a732a1d6b0" containerID="594ea29889407f9a55293be2914d237093beac154f797e7c4a55390c7e75fbb3" exitCode=0 Jan 21 15:43:42 crc kubenswrapper[5021]: I0121 15:43:42.807205 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-44zpx" event={"ID":"085747f2-d183-4dc9-89dc-91a732a1d6b0","Type":"ContainerDied","Data":"594ea29889407f9a55293be2914d237093beac154f797e7c4a55390c7e75fbb3"} Jan 21 15:43:42 crc kubenswrapper[5021]: I0121 15:43:42.807234 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-44zpx" event={"ID":"085747f2-d183-4dc9-89dc-91a732a1d6b0","Type":"ContainerStarted","Data":"8937e60dd705e9dc258072ce8e3af3f567bea4664db221b3beeb07b61aafa140"} Jan 21 15:43:42 crc kubenswrapper[5021]: I0121 15:43:42.828489 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/root-account-create-update-th5wh" podStartSLOduration=14.828461346 podStartE2EDuration="14.828461346s" podCreationTimestamp="2026-01-21 15:43:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:43:42.81835034 +0000 UTC m=+1164.353464239" watchObservedRunningTime="2026-01-21 15:43:42.828461346 +0000 UTC m=+1164.363575235" Jan 21 15:43:42 crc kubenswrapper[5021]: I0121 15:43:42.833889 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c57ca8a9-e2f8-4404-b56f-649297cba618","Type":"ContainerStarted","Data":"70645c7bc51255ddd66eef76b13c0c8daa2f66c30285a645c871c40c0117099a"} Jan 21 15:43:42 crc kubenswrapper[5021]: I0121 15:43:42.841869 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-10e4-account-create-update-vpspz" podStartSLOduration=14.841844253 podStartE2EDuration="14.841844253s" podCreationTimestamp="2026-01-21 15:43:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:43:42.833611128 +0000 UTC m=+1164.368725017" watchObservedRunningTime="2026-01-21 15:43:42.841844253 +0000 UTC m=+1164.376958142" Jan 21 15:43:42 crc kubenswrapper[5021]: I0121 15:43:42.865091 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-a8a5-account-create-update-x4n5x" podStartSLOduration=13.865073579 podStartE2EDuration="13.865073579s" podCreationTimestamp="2026-01-21 15:43:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:43:42.859315931 +0000 UTC m=+1164.394429820" watchObservedRunningTime="2026-01-21 15:43:42.865073579 +0000 UTC m=+1164.400187468" Jan 21 15:43:43 crc kubenswrapper[5021]: I0121 15:43:43.845989 5021 generic.go:334] "Generic (PLEG): container finished" podID="744bc068-76e3-4357-8794-a9c58add89c9" containerID="1b8f2c8cd08e817b6588454ef88ddaf1746ece06c08ba02b29cbe9d2437827d0" exitCode=0 Jan 21 15:43:43 crc kubenswrapper[5021]: I0121 15:43:43.846076 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-th5wh" event={"ID":"744bc068-76e3-4357-8794-a9c58add89c9","Type":"ContainerDied","Data":"1b8f2c8cd08e817b6588454ef88ddaf1746ece06c08ba02b29cbe9d2437827d0"} Jan 21 15:43:43 crc kubenswrapper[5021]: I0121 15:43:43.849617 5021 generic.go:334] "Generic (PLEG): container finished" podID="503da4ea-7c79-4bfe-b37b-d4db888b76f4" containerID="c45563f373ab91b4ce46332e301191ed228473ee66b6a53b2393f1dfc4a4b932" exitCode=0 Jan 21 15:43:43 crc kubenswrapper[5021]: I0121 15:43:43.849671 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-a8a5-account-create-update-x4n5x" event={"ID":"503da4ea-7c79-4bfe-b37b-d4db888b76f4","Type":"ContainerDied","Data":"c45563f373ab91b4ce46332e301191ed228473ee66b6a53b2393f1dfc4a4b932"} Jan 21 15:43:43 crc kubenswrapper[5021]: I0121 15:43:43.856373 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c57ca8a9-e2f8-4404-b56f-649297cba618","Type":"ContainerStarted","Data":"286ee38dedf5ca3a893d36e49ab99761202c13f3e2d7786385e279604c029ca3"} Jan 21 15:43:43 crc kubenswrapper[5021]: I0121 15:43:43.856427 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c57ca8a9-e2f8-4404-b56f-649297cba618","Type":"ContainerStarted","Data":"a89b180c0135b475ed1ba2315e698962a948ee0a359d0c123f97f5bef6cca782"} Jan 21 15:43:43 crc kubenswrapper[5021]: I0121 15:43:43.882445 5021 generic.go:334] "Generic (PLEG): container finished" podID="da995ed3-1cf2-4f8e-ba7f-9780821e31f3" containerID="cc610cc760d0bea04b57dbf46669247b99b993ae8d42b6b8d4657eee8f75076e" exitCode=0 Jan 21 15:43:43 crc kubenswrapper[5021]: I0121 15:43:43.882658 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-10e4-account-create-update-vpspz" event={"ID":"da995ed3-1cf2-4f8e-ba7f-9780821e31f3","Type":"ContainerDied","Data":"cc610cc760d0bea04b57dbf46669247b99b993ae8d42b6b8d4657eee8f75076e"} Jan 21 15:43:58 crc kubenswrapper[5021]: E0121 15:43:58.775991 5021 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-keystone:current-podified" Jan 21 15:43:58 crc kubenswrapper[5021]: E0121 15:43:58.776774 5021 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:keystone-db-sync,Image:quay.io/podified-antelope-centos9/openstack-keystone:current-podified,Command:[/bin/bash],Args:[-c keystone-manage db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/keystone/keystone.conf,SubPath:keystone.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-tjmph,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42425,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42425,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-db-sync-xtcpm_openstack(9984d786-ae3e-4cfe-8bf6-099159dada65): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 21 15:43:58 crc kubenswrapper[5021]: E0121 15:43:58.778714 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"keystone-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/keystone-db-sync-xtcpm" podUID="9984d786-ae3e-4cfe-8bf6-099159dada65" Jan 21 15:43:58 crc kubenswrapper[5021]: I0121 15:43:58.916590 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-10e4-account-create-update-vpspz" Jan 21 15:43:58 crc kubenswrapper[5021]: I0121 15:43:58.925855 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-44zpx" Jan 21 15:43:58 crc kubenswrapper[5021]: I0121 15:43:58.953504 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/da995ed3-1cf2-4f8e-ba7f-9780821e31f3-operator-scripts\") pod \"da995ed3-1cf2-4f8e-ba7f-9780821e31f3\" (UID: \"da995ed3-1cf2-4f8e-ba7f-9780821e31f3\") " Jan 21 15:43:58 crc kubenswrapper[5021]: I0121 15:43:58.953653 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tclzp\" (UniqueName: \"kubernetes.io/projected/da995ed3-1cf2-4f8e-ba7f-9780821e31f3-kube-api-access-tclzp\") pod \"da995ed3-1cf2-4f8e-ba7f-9780821e31f3\" (UID: \"da995ed3-1cf2-4f8e-ba7f-9780821e31f3\") " Jan 21 15:43:58 crc kubenswrapper[5021]: I0121 15:43:58.954374 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/da995ed3-1cf2-4f8e-ba7f-9780821e31f3-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "da995ed3-1cf2-4f8e-ba7f-9780821e31f3" (UID: "da995ed3-1cf2-4f8e-ba7f-9780821e31f3"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:43:58 crc kubenswrapper[5021]: I0121 15:43:58.964212 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/da995ed3-1cf2-4f8e-ba7f-9780821e31f3-kube-api-access-tclzp" (OuterVolumeSpecName: "kube-api-access-tclzp") pod "da995ed3-1cf2-4f8e-ba7f-9780821e31f3" (UID: "da995ed3-1cf2-4f8e-ba7f-9780821e31f3"). InnerVolumeSpecName "kube-api-access-tclzp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:43:58 crc kubenswrapper[5021]: I0121 15:43:58.967555 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-a8a5-account-create-update-x4n5x" Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.005810 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-r4jkd" Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.024446 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-wfhkk" event={"ID":"7f2d2566-dcbf-437d-bf8b-32d6a49b34aa","Type":"ContainerDied","Data":"18fda36c96301540a1017855690feab5faf0c1a9f2a6dee38af90bd32940076a"} Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.024486 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="18fda36c96301540a1017855690feab5faf0c1a9f2a6dee38af90bd32940076a" Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.025892 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-8c4f-account-create-update-jb2q4" Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.028104 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-r4jkd" Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.028089 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-r4jkd" event={"ID":"624bb493-3f8a-4a62-993b-f66ccc317cc9","Type":"ContainerDied","Data":"53d4b0bb5cf7b6b3306676be5886c7b73a1fea9fc63456776e882a878f56a1ff"} Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.028243 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="53d4b0bb5cf7b6b3306676be5886c7b73a1fea9fc63456776e882a878f56a1ff" Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.029801 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-wfhkk" Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.030430 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-a8a5-account-create-update-x4n5x" event={"ID":"503da4ea-7c79-4bfe-b37b-d4db888b76f4","Type":"ContainerDied","Data":"c1f359e472ae7b9d6a30cfe8b0e9fd3ba1f8bb062905fb755e14115937159a08"} Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.030461 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c1f359e472ae7b9d6a30cfe8b0e9fd3ba1f8bb062905fb755e14115937159a08" Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.030510 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-a8a5-account-create-update-x4n5x" Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.037247 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-44zpx" Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.037266 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-44zpx" event={"ID":"085747f2-d183-4dc9-89dc-91a732a1d6b0","Type":"ContainerDied","Data":"8937e60dd705e9dc258072ce8e3af3f567bea4664db221b3beeb07b61aafa140"} Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.037304 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8937e60dd705e9dc258072ce8e3af3f567bea4664db221b3beeb07b61aafa140" Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.039103 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-th5wh" Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.058303 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-10e4-account-create-update-vpspz" event={"ID":"da995ed3-1cf2-4f8e-ba7f-9780821e31f3","Type":"ContainerDied","Data":"f8ae4c35dce168fc701bd172b1428fe37b02e26490a9bdb7c72aa6d4b7a79291"} Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.058345 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f8ae4c35dce168fc701bd172b1428fe37b02e26490a9bdb7c72aa6d4b7a79291" Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.058413 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-10e4-account-create-update-vpspz" Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.058567 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/503da4ea-7c79-4bfe-b37b-d4db888b76f4-operator-scripts\") pod \"503da4ea-7c79-4bfe-b37b-d4db888b76f4\" (UID: \"503da4ea-7c79-4bfe-b37b-d4db888b76f4\") " Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.058709 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-stpqs\" (UniqueName: \"kubernetes.io/projected/085747f2-d183-4dc9-89dc-91a732a1d6b0-kube-api-access-stpqs\") pod \"085747f2-d183-4dc9-89dc-91a732a1d6b0\" (UID: \"085747f2-d183-4dc9-89dc-91a732a1d6b0\") " Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.058740 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tp6tt\" (UniqueName: \"kubernetes.io/projected/503da4ea-7c79-4bfe-b37b-d4db888b76f4-kube-api-access-tp6tt\") pod \"503da4ea-7c79-4bfe-b37b-d4db888b76f4\" (UID: \"503da4ea-7c79-4bfe-b37b-d4db888b76f4\") " Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.058825 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jwx4z\" (UniqueName: \"kubernetes.io/projected/624bb493-3f8a-4a62-993b-f66ccc317cc9-kube-api-access-jwx4z\") pod \"624bb493-3f8a-4a62-993b-f66ccc317cc9\" (UID: \"624bb493-3f8a-4a62-993b-f66ccc317cc9\") " Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.058900 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/624bb493-3f8a-4a62-993b-f66ccc317cc9-operator-scripts\") pod \"624bb493-3f8a-4a62-993b-f66ccc317cc9\" (UID: \"624bb493-3f8a-4a62-993b-f66ccc317cc9\") " Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.058962 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/085747f2-d183-4dc9-89dc-91a732a1d6b0-operator-scripts\") pod \"085747f2-d183-4dc9-89dc-91a732a1d6b0\" (UID: \"085747f2-d183-4dc9-89dc-91a732a1d6b0\") " Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.059154 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/503da4ea-7c79-4bfe-b37b-d4db888b76f4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "503da4ea-7c79-4bfe-b37b-d4db888b76f4" (UID: "503da4ea-7c79-4bfe-b37b-d4db888b76f4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.059441 5021 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/da995ed3-1cf2-4f8e-ba7f-9780821e31f3-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.059467 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tclzp\" (UniqueName: \"kubernetes.io/projected/da995ed3-1cf2-4f8e-ba7f-9780821e31f3-kube-api-access-tclzp\") on node \"crc\" DevicePath \"\"" Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.059481 5021 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/503da4ea-7c79-4bfe-b37b-d4db888b76f4-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.062750 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/085747f2-d183-4dc9-89dc-91a732a1d6b0-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "085747f2-d183-4dc9-89dc-91a732a1d6b0" (UID: "085747f2-d183-4dc9-89dc-91a732a1d6b0"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.063351 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/624bb493-3f8a-4a62-993b-f66ccc317cc9-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "624bb493-3f8a-4a62-993b-f66ccc317cc9" (UID: "624bb493-3f8a-4a62-993b-f66ccc317cc9"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.063899 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-8c4f-account-create-update-jb2q4" event={"ID":"03d8cc6b-a5fc-4f8c-9b94-a3d54114278f","Type":"ContainerDied","Data":"5f43382852aa165200504e3d6fa71b19180b5820be6e754b1711ac55cd038007"} Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.063957 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5f43382852aa165200504e3d6fa71b19180b5820be6e754b1711ac55cd038007" Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.064044 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-8c4f-account-create-update-jb2q4" Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.068149 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/624bb493-3f8a-4a62-993b-f66ccc317cc9-kube-api-access-jwx4z" (OuterVolumeSpecName: "kube-api-access-jwx4z") pod "624bb493-3f8a-4a62-993b-f66ccc317cc9" (UID: "624bb493-3f8a-4a62-993b-f66ccc317cc9"). InnerVolumeSpecName "kube-api-access-jwx4z". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.068276 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/503da4ea-7c79-4bfe-b37b-d4db888b76f4-kube-api-access-tp6tt" (OuterVolumeSpecName: "kube-api-access-tp6tt") pod "503da4ea-7c79-4bfe-b37b-d4db888b76f4" (UID: "503da4ea-7c79-4bfe-b37b-d4db888b76f4"). InnerVolumeSpecName "kube-api-access-tp6tt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.069702 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/085747f2-d183-4dc9-89dc-91a732a1d6b0-kube-api-access-stpqs" (OuterVolumeSpecName: "kube-api-access-stpqs") pod "085747f2-d183-4dc9-89dc-91a732a1d6b0" (UID: "085747f2-d183-4dc9-89dc-91a732a1d6b0"). InnerVolumeSpecName "kube-api-access-stpqs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.085018 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-th5wh" Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.085162 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-th5wh" event={"ID":"744bc068-76e3-4357-8794-a9c58add89c9","Type":"ContainerDied","Data":"54b487c4156e4985da3994d6098ecb83dc7ad18888cb8f4f631cd21fa98635a6"} Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.085752 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="54b487c4156e4985da3994d6098ecb83dc7ad18888cb8f4f631cd21fa98635a6" Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.161790 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/744bc068-76e3-4357-8794-a9c58add89c9-operator-scripts\") pod \"744bc068-76e3-4357-8794-a9c58add89c9\" (UID: \"744bc068-76e3-4357-8794-a9c58add89c9\") " Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.161872 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l9jzs\" (UniqueName: \"kubernetes.io/projected/03d8cc6b-a5fc-4f8c-9b94-a3d54114278f-kube-api-access-l9jzs\") pod \"03d8cc6b-a5fc-4f8c-9b94-a3d54114278f\" (UID: \"03d8cc6b-a5fc-4f8c-9b94-a3d54114278f\") " Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.161919 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/03d8cc6b-a5fc-4f8c-9b94-a3d54114278f-operator-scripts\") pod \"03d8cc6b-a5fc-4f8c-9b94-a3d54114278f\" (UID: \"03d8cc6b-a5fc-4f8c-9b94-a3d54114278f\") " Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.161958 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7cnz6\" (UniqueName: \"kubernetes.io/projected/744bc068-76e3-4357-8794-a9c58add89c9-kube-api-access-7cnz6\") pod \"744bc068-76e3-4357-8794-a9c58add89c9\" (UID: \"744bc068-76e3-4357-8794-a9c58add89c9\") " Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.161996 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7f2d2566-dcbf-437d-bf8b-32d6a49b34aa-operator-scripts\") pod \"7f2d2566-dcbf-437d-bf8b-32d6a49b34aa\" (UID: \"7f2d2566-dcbf-437d-bf8b-32d6a49b34aa\") " Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.162036 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wm8vr\" (UniqueName: \"kubernetes.io/projected/7f2d2566-dcbf-437d-bf8b-32d6a49b34aa-kube-api-access-wm8vr\") pod \"7f2d2566-dcbf-437d-bf8b-32d6a49b34aa\" (UID: \"7f2d2566-dcbf-437d-bf8b-32d6a49b34aa\") " Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.162854 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/744bc068-76e3-4357-8794-a9c58add89c9-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "744bc068-76e3-4357-8794-a9c58add89c9" (UID: "744bc068-76e3-4357-8794-a9c58add89c9"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.163016 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jwx4z\" (UniqueName: \"kubernetes.io/projected/624bb493-3f8a-4a62-993b-f66ccc317cc9-kube-api-access-jwx4z\") on node \"crc\" DevicePath \"\"" Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.163044 5021 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/624bb493-3f8a-4a62-993b-f66ccc317cc9-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.163058 5021 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/085747f2-d183-4dc9-89dc-91a732a1d6b0-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.163071 5021 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/744bc068-76e3-4357-8794-a9c58add89c9-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.163085 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-stpqs\" (UniqueName: \"kubernetes.io/projected/085747f2-d183-4dc9-89dc-91a732a1d6b0-kube-api-access-stpqs\") on node \"crc\" DevicePath \"\"" Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.163098 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tp6tt\" (UniqueName: \"kubernetes.io/projected/503da4ea-7c79-4bfe-b37b-d4db888b76f4-kube-api-access-tp6tt\") on node \"crc\" DevicePath \"\"" Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.163344 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7f2d2566-dcbf-437d-bf8b-32d6a49b34aa-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7f2d2566-dcbf-437d-bf8b-32d6a49b34aa" (UID: "7f2d2566-dcbf-437d-bf8b-32d6a49b34aa"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.164342 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/03d8cc6b-a5fc-4f8c-9b94-a3d54114278f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "03d8cc6b-a5fc-4f8c-9b94-a3d54114278f" (UID: "03d8cc6b-a5fc-4f8c-9b94-a3d54114278f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.177382 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/03d8cc6b-a5fc-4f8c-9b94-a3d54114278f-kube-api-access-l9jzs" (OuterVolumeSpecName: "kube-api-access-l9jzs") pod "03d8cc6b-a5fc-4f8c-9b94-a3d54114278f" (UID: "03d8cc6b-a5fc-4f8c-9b94-a3d54114278f"). InnerVolumeSpecName "kube-api-access-l9jzs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.188758 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7f2d2566-dcbf-437d-bf8b-32d6a49b34aa-kube-api-access-wm8vr" (OuterVolumeSpecName: "kube-api-access-wm8vr") pod "7f2d2566-dcbf-437d-bf8b-32d6a49b34aa" (UID: "7f2d2566-dcbf-437d-bf8b-32d6a49b34aa"). InnerVolumeSpecName "kube-api-access-wm8vr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.188858 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/744bc068-76e3-4357-8794-a9c58add89c9-kube-api-access-7cnz6" (OuterVolumeSpecName: "kube-api-access-7cnz6") pod "744bc068-76e3-4357-8794-a9c58add89c9" (UID: "744bc068-76e3-4357-8794-a9c58add89c9"). InnerVolumeSpecName "kube-api-access-7cnz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.264149 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l9jzs\" (UniqueName: \"kubernetes.io/projected/03d8cc6b-a5fc-4f8c-9b94-a3d54114278f-kube-api-access-l9jzs\") on node \"crc\" DevicePath \"\"" Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.264181 5021 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/03d8cc6b-a5fc-4f8c-9b94-a3d54114278f-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.264194 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7cnz6\" (UniqueName: \"kubernetes.io/projected/744bc068-76e3-4357-8794-a9c58add89c9-kube-api-access-7cnz6\") on node \"crc\" DevicePath \"\"" Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.264208 5021 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7f2d2566-dcbf-437d-bf8b-32d6a49b34aa-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:43:59 crc kubenswrapper[5021]: I0121 15:43:59.264221 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wm8vr\" (UniqueName: \"kubernetes.io/projected/7f2d2566-dcbf-437d-bf8b-32d6a49b34aa-kube-api-access-wm8vr\") on node \"crc\" DevicePath \"\"" Jan 21 15:43:59 crc kubenswrapper[5021]: E0121 15:43:59.309393 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"keystone-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-keystone:current-podified\\\"\"" pod="openstack/keystone-db-sync-xtcpm" podUID="9984d786-ae3e-4cfe-8bf6-099159dada65" Jan 21 15:44:00 crc kubenswrapper[5021]: I0121 15:44:00.092558 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-8jhv7" event={"ID":"c39660b0-c6d5-4b6e-95b8-12b8fbf38a14","Type":"ContainerStarted","Data":"d9a507933f6db91750f321b78dd88b1f0d89e634d83ca2dd3c6ff7e83051f0c9"} Jan 21 15:44:00 crc kubenswrapper[5021]: I0121 15:44:00.101029 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-wfhkk" Jan 21 15:44:00 crc kubenswrapper[5021]: I0121 15:44:00.108574 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c57ca8a9-e2f8-4404-b56f-649297cba618","Type":"ContainerStarted","Data":"1177abe509b1fc7d36535c70f37ab796f728a73afa2630b7247a37b263d96673"} Jan 21 15:44:00 crc kubenswrapper[5021]: I0121 15:44:00.108644 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c57ca8a9-e2f8-4404-b56f-649297cba618","Type":"ContainerStarted","Data":"d0d02b0697c6f0cdbe32b4c15779fa2b7fb9db8ad0d4beee7917a8570d9ce131"} Jan 21 15:44:00 crc kubenswrapper[5021]: I0121 15:44:00.108663 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c57ca8a9-e2f8-4404-b56f-649297cba618","Type":"ContainerStarted","Data":"0b98874dda34c3adb9708dfa4fddca97d42d24280001e6ca51c29fdf4e04e366"} Jan 21 15:44:00 crc kubenswrapper[5021]: I0121 15:44:00.108674 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c57ca8a9-e2f8-4404-b56f-649297cba618","Type":"ContainerStarted","Data":"21f76b84c77562932f1ebb5a263ddfe5a755ae6258ad955ca59a13307d229d84"} Jan 21 15:44:00 crc kubenswrapper[5021]: I0121 15:44:00.122622 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-8jhv7" podStartSLOduration=2.001232168 podStartE2EDuration="38.12260402s" podCreationTimestamp="2026-01-21 15:43:22 +0000 UTC" firstStartedPulling="2026-01-21 15:43:23.194249677 +0000 UTC m=+1144.729363566" lastFinishedPulling="2026-01-21 15:43:59.315621529 +0000 UTC m=+1180.850735418" observedRunningTime="2026-01-21 15:44:00.108653825 +0000 UTC m=+1181.643767714" watchObservedRunningTime="2026-01-21 15:44:00.12260402 +0000 UTC m=+1181.657717909" Jan 21 15:44:00 crc kubenswrapper[5021]: I0121 15:44:00.308586 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-th5wh"] Jan 21 15:44:00 crc kubenswrapper[5021]: I0121 15:44:00.315382 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-th5wh"] Jan 21 15:44:00 crc kubenswrapper[5021]: I0121 15:44:00.750364 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="744bc068-76e3-4357-8794-a9c58add89c9" path="/var/lib/kubelet/pods/744bc068-76e3-4357-8794-a9c58add89c9/volumes" Jan 21 15:44:01 crc kubenswrapper[5021]: I0121 15:44:01.170443 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=54.744626234 podStartE2EDuration="1m18.170425765s" podCreationTimestamp="2026-01-21 15:42:43 +0000 UTC" firstStartedPulling="2026-01-21 15:43:18.843331246 +0000 UTC m=+1140.378445135" lastFinishedPulling="2026-01-21 15:43:42.269130777 +0000 UTC m=+1163.804244666" observedRunningTime="2026-01-21 15:44:01.16352203 +0000 UTC m=+1182.698635929" watchObservedRunningTime="2026-01-21 15:44:01.170425765 +0000 UTC m=+1182.705539654" Jan 21 15:44:01 crc kubenswrapper[5021]: I0121 15:44:01.447884 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-kprj2"] Jan 21 15:44:01 crc kubenswrapper[5021]: E0121 15:44:01.448315 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da995ed3-1cf2-4f8e-ba7f-9780821e31f3" containerName="mariadb-account-create-update" Jan 21 15:44:01 crc kubenswrapper[5021]: I0121 15:44:01.448340 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="da995ed3-1cf2-4f8e-ba7f-9780821e31f3" containerName="mariadb-account-create-update" Jan 21 15:44:01 crc kubenswrapper[5021]: E0121 15:44:01.448354 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="503da4ea-7c79-4bfe-b37b-d4db888b76f4" containerName="mariadb-account-create-update" Jan 21 15:44:01 crc kubenswrapper[5021]: I0121 15:44:01.448365 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="503da4ea-7c79-4bfe-b37b-d4db888b76f4" containerName="mariadb-account-create-update" Jan 21 15:44:01 crc kubenswrapper[5021]: E0121 15:44:01.448382 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="744bc068-76e3-4357-8794-a9c58add89c9" containerName="mariadb-account-create-update" Jan 21 15:44:01 crc kubenswrapper[5021]: I0121 15:44:01.448390 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="744bc068-76e3-4357-8794-a9c58add89c9" containerName="mariadb-account-create-update" Jan 21 15:44:01 crc kubenswrapper[5021]: E0121 15:44:01.448405 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f2d2566-dcbf-437d-bf8b-32d6a49b34aa" containerName="mariadb-database-create" Jan 21 15:44:01 crc kubenswrapper[5021]: I0121 15:44:01.448415 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f2d2566-dcbf-437d-bf8b-32d6a49b34aa" containerName="mariadb-database-create" Jan 21 15:44:01 crc kubenswrapper[5021]: E0121 15:44:01.448438 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03d8cc6b-a5fc-4f8c-9b94-a3d54114278f" containerName="mariadb-account-create-update" Jan 21 15:44:01 crc kubenswrapper[5021]: I0121 15:44:01.448509 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="03d8cc6b-a5fc-4f8c-9b94-a3d54114278f" containerName="mariadb-account-create-update" Jan 21 15:44:01 crc kubenswrapper[5021]: E0121 15:44:01.448535 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="085747f2-d183-4dc9-89dc-91a732a1d6b0" containerName="mariadb-database-create" Jan 21 15:44:01 crc kubenswrapper[5021]: I0121 15:44:01.448545 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="085747f2-d183-4dc9-89dc-91a732a1d6b0" containerName="mariadb-database-create" Jan 21 15:44:01 crc kubenswrapper[5021]: E0121 15:44:01.448563 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="624bb493-3f8a-4a62-993b-f66ccc317cc9" containerName="mariadb-database-create" Jan 21 15:44:01 crc kubenswrapper[5021]: I0121 15:44:01.448572 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="624bb493-3f8a-4a62-993b-f66ccc317cc9" containerName="mariadb-database-create" Jan 21 15:44:01 crc kubenswrapper[5021]: I0121 15:44:01.448753 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="da995ed3-1cf2-4f8e-ba7f-9780821e31f3" containerName="mariadb-account-create-update" Jan 21 15:44:01 crc kubenswrapper[5021]: I0121 15:44:01.448773 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="085747f2-d183-4dc9-89dc-91a732a1d6b0" containerName="mariadb-database-create" Jan 21 15:44:01 crc kubenswrapper[5021]: I0121 15:44:01.448783 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="503da4ea-7c79-4bfe-b37b-d4db888b76f4" containerName="mariadb-account-create-update" Jan 21 15:44:01 crc kubenswrapper[5021]: I0121 15:44:01.448796 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="624bb493-3f8a-4a62-993b-f66ccc317cc9" containerName="mariadb-database-create" Jan 21 15:44:01 crc kubenswrapper[5021]: I0121 15:44:01.448806 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="03d8cc6b-a5fc-4f8c-9b94-a3d54114278f" containerName="mariadb-account-create-update" Jan 21 15:44:01 crc kubenswrapper[5021]: I0121 15:44:01.448821 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f2d2566-dcbf-437d-bf8b-32d6a49b34aa" containerName="mariadb-database-create" Jan 21 15:44:01 crc kubenswrapper[5021]: I0121 15:44:01.448838 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="744bc068-76e3-4357-8794-a9c58add89c9" containerName="mariadb-account-create-update" Jan 21 15:44:01 crc kubenswrapper[5021]: I0121 15:44:01.449884 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d5b6d6b67-kprj2" Jan 21 15:44:01 crc kubenswrapper[5021]: I0121 15:44:01.452702 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Jan 21 15:44:01 crc kubenswrapper[5021]: I0121 15:44:01.457288 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-kprj2"] Jan 21 15:44:01 crc kubenswrapper[5021]: I0121 15:44:01.501997 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6722dc85-c75c-4751-b0c9-1a7ffc45afaa-dns-swift-storage-0\") pod \"dnsmasq-dns-6d5b6d6b67-kprj2\" (UID: \"6722dc85-c75c-4751-b0c9-1a7ffc45afaa\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-kprj2" Jan 21 15:44:01 crc kubenswrapper[5021]: I0121 15:44:01.502056 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6722dc85-c75c-4751-b0c9-1a7ffc45afaa-ovsdbserver-sb\") pod \"dnsmasq-dns-6d5b6d6b67-kprj2\" (UID: \"6722dc85-c75c-4751-b0c9-1a7ffc45afaa\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-kprj2" Jan 21 15:44:01 crc kubenswrapper[5021]: I0121 15:44:01.502126 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6722dc85-c75c-4751-b0c9-1a7ffc45afaa-config\") pod \"dnsmasq-dns-6d5b6d6b67-kprj2\" (UID: \"6722dc85-c75c-4751-b0c9-1a7ffc45afaa\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-kprj2" Jan 21 15:44:01 crc kubenswrapper[5021]: I0121 15:44:01.502221 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6722dc85-c75c-4751-b0c9-1a7ffc45afaa-dns-svc\") pod \"dnsmasq-dns-6d5b6d6b67-kprj2\" (UID: \"6722dc85-c75c-4751-b0c9-1a7ffc45afaa\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-kprj2" Jan 21 15:44:01 crc kubenswrapper[5021]: I0121 15:44:01.502248 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wx7lg\" (UniqueName: \"kubernetes.io/projected/6722dc85-c75c-4751-b0c9-1a7ffc45afaa-kube-api-access-wx7lg\") pod \"dnsmasq-dns-6d5b6d6b67-kprj2\" (UID: \"6722dc85-c75c-4751-b0c9-1a7ffc45afaa\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-kprj2" Jan 21 15:44:01 crc kubenswrapper[5021]: I0121 15:44:01.502290 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6722dc85-c75c-4751-b0c9-1a7ffc45afaa-ovsdbserver-nb\") pod \"dnsmasq-dns-6d5b6d6b67-kprj2\" (UID: \"6722dc85-c75c-4751-b0c9-1a7ffc45afaa\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-kprj2" Jan 21 15:44:01 crc kubenswrapper[5021]: I0121 15:44:01.603557 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6722dc85-c75c-4751-b0c9-1a7ffc45afaa-ovsdbserver-nb\") pod \"dnsmasq-dns-6d5b6d6b67-kprj2\" (UID: \"6722dc85-c75c-4751-b0c9-1a7ffc45afaa\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-kprj2" Jan 21 15:44:01 crc kubenswrapper[5021]: I0121 15:44:01.603645 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6722dc85-c75c-4751-b0c9-1a7ffc45afaa-dns-swift-storage-0\") pod \"dnsmasq-dns-6d5b6d6b67-kprj2\" (UID: \"6722dc85-c75c-4751-b0c9-1a7ffc45afaa\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-kprj2" Jan 21 15:44:01 crc kubenswrapper[5021]: I0121 15:44:01.603687 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6722dc85-c75c-4751-b0c9-1a7ffc45afaa-ovsdbserver-sb\") pod \"dnsmasq-dns-6d5b6d6b67-kprj2\" (UID: \"6722dc85-c75c-4751-b0c9-1a7ffc45afaa\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-kprj2" Jan 21 15:44:01 crc kubenswrapper[5021]: I0121 15:44:01.603776 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6722dc85-c75c-4751-b0c9-1a7ffc45afaa-config\") pod \"dnsmasq-dns-6d5b6d6b67-kprj2\" (UID: \"6722dc85-c75c-4751-b0c9-1a7ffc45afaa\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-kprj2" Jan 21 15:44:01 crc kubenswrapper[5021]: I0121 15:44:01.603876 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6722dc85-c75c-4751-b0c9-1a7ffc45afaa-dns-svc\") pod \"dnsmasq-dns-6d5b6d6b67-kprj2\" (UID: \"6722dc85-c75c-4751-b0c9-1a7ffc45afaa\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-kprj2" Jan 21 15:44:01 crc kubenswrapper[5021]: I0121 15:44:01.603918 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wx7lg\" (UniqueName: \"kubernetes.io/projected/6722dc85-c75c-4751-b0c9-1a7ffc45afaa-kube-api-access-wx7lg\") pod \"dnsmasq-dns-6d5b6d6b67-kprj2\" (UID: \"6722dc85-c75c-4751-b0c9-1a7ffc45afaa\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-kprj2" Jan 21 15:44:01 crc kubenswrapper[5021]: I0121 15:44:01.604784 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6722dc85-c75c-4751-b0c9-1a7ffc45afaa-dns-swift-storage-0\") pod \"dnsmasq-dns-6d5b6d6b67-kprj2\" (UID: \"6722dc85-c75c-4751-b0c9-1a7ffc45afaa\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-kprj2" Jan 21 15:44:01 crc kubenswrapper[5021]: I0121 15:44:01.604807 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6722dc85-c75c-4751-b0c9-1a7ffc45afaa-config\") pod \"dnsmasq-dns-6d5b6d6b67-kprj2\" (UID: \"6722dc85-c75c-4751-b0c9-1a7ffc45afaa\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-kprj2" Jan 21 15:44:01 crc kubenswrapper[5021]: I0121 15:44:01.604839 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6722dc85-c75c-4751-b0c9-1a7ffc45afaa-ovsdbserver-sb\") pod \"dnsmasq-dns-6d5b6d6b67-kprj2\" (UID: \"6722dc85-c75c-4751-b0c9-1a7ffc45afaa\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-kprj2" Jan 21 15:44:01 crc kubenswrapper[5021]: I0121 15:44:01.605074 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6722dc85-c75c-4751-b0c9-1a7ffc45afaa-dns-svc\") pod \"dnsmasq-dns-6d5b6d6b67-kprj2\" (UID: \"6722dc85-c75c-4751-b0c9-1a7ffc45afaa\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-kprj2" Jan 21 15:44:01 crc kubenswrapper[5021]: I0121 15:44:01.605654 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6722dc85-c75c-4751-b0c9-1a7ffc45afaa-ovsdbserver-nb\") pod \"dnsmasq-dns-6d5b6d6b67-kprj2\" (UID: \"6722dc85-c75c-4751-b0c9-1a7ffc45afaa\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-kprj2" Jan 21 15:44:01 crc kubenswrapper[5021]: I0121 15:44:01.629085 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wx7lg\" (UniqueName: \"kubernetes.io/projected/6722dc85-c75c-4751-b0c9-1a7ffc45afaa-kube-api-access-wx7lg\") pod \"dnsmasq-dns-6d5b6d6b67-kprj2\" (UID: \"6722dc85-c75c-4751-b0c9-1a7ffc45afaa\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-kprj2" Jan 21 15:44:01 crc kubenswrapper[5021]: I0121 15:44:01.765275 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d5b6d6b67-kprj2" Jan 21 15:44:02 crc kubenswrapper[5021]: I0121 15:44:02.193337 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-kprj2"] Jan 21 15:44:02 crc kubenswrapper[5021]: W0121 15:44:02.195096 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6722dc85_c75c_4751_b0c9_1a7ffc45afaa.slice/crio-4738c4c592d9f065ef6f69c26fe3308b6908e1cda1ada00a18599d5294dd65aa WatchSource:0}: Error finding container 4738c4c592d9f065ef6f69c26fe3308b6908e1cda1ada00a18599d5294dd65aa: Status 404 returned error can't find the container with id 4738c4c592d9f065ef6f69c26fe3308b6908e1cda1ada00a18599d5294dd65aa Jan 21 15:44:03 crc kubenswrapper[5021]: I0121 15:44:03.142709 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d5b6d6b67-kprj2" event={"ID":"6722dc85-c75c-4751-b0c9-1a7ffc45afaa","Type":"ContainerStarted","Data":"4738c4c592d9f065ef6f69c26fe3308b6908e1cda1ada00a18599d5294dd65aa"} Jan 21 15:44:04 crc kubenswrapper[5021]: I0121 15:44:04.098418 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-zlg86"] Jan 21 15:44:04 crc kubenswrapper[5021]: I0121 15:44:04.100852 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-zlg86" Jan 21 15:44:04 crc kubenswrapper[5021]: I0121 15:44:04.103081 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-mariadb-root-db-secret" Jan 21 15:44:04 crc kubenswrapper[5021]: I0121 15:44:04.120092 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-zlg86"] Jan 21 15:44:04 crc kubenswrapper[5021]: I0121 15:44:04.151608 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d5b6d6b67-kprj2" event={"ID":"6722dc85-c75c-4751-b0c9-1a7ffc45afaa","Type":"ContainerStarted","Data":"c3966764226e28e67ed97336fca66009bbc265e4b7084b130fa49364be513ede"} Jan 21 15:44:04 crc kubenswrapper[5021]: I0121 15:44:04.171099 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b7ebd6e-4ae4-4e24-b15f-b149b88ab987-operator-scripts\") pod \"root-account-create-update-zlg86\" (UID: \"4b7ebd6e-4ae4-4e24-b15f-b149b88ab987\") " pod="openstack/root-account-create-update-zlg86" Jan 21 15:44:04 crc kubenswrapper[5021]: I0121 15:44:04.171460 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tjmkx\" (UniqueName: \"kubernetes.io/projected/4b7ebd6e-4ae4-4e24-b15f-b149b88ab987-kube-api-access-tjmkx\") pod \"root-account-create-update-zlg86\" (UID: \"4b7ebd6e-4ae4-4e24-b15f-b149b88ab987\") " pod="openstack/root-account-create-update-zlg86" Jan 21 15:44:04 crc kubenswrapper[5021]: I0121 15:44:04.273455 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tjmkx\" (UniqueName: \"kubernetes.io/projected/4b7ebd6e-4ae4-4e24-b15f-b149b88ab987-kube-api-access-tjmkx\") pod \"root-account-create-update-zlg86\" (UID: \"4b7ebd6e-4ae4-4e24-b15f-b149b88ab987\") " pod="openstack/root-account-create-update-zlg86" Jan 21 15:44:04 crc kubenswrapper[5021]: I0121 15:44:04.273577 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b7ebd6e-4ae4-4e24-b15f-b149b88ab987-operator-scripts\") pod \"root-account-create-update-zlg86\" (UID: \"4b7ebd6e-4ae4-4e24-b15f-b149b88ab987\") " pod="openstack/root-account-create-update-zlg86" Jan 21 15:44:04 crc kubenswrapper[5021]: I0121 15:44:04.274796 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b7ebd6e-4ae4-4e24-b15f-b149b88ab987-operator-scripts\") pod \"root-account-create-update-zlg86\" (UID: \"4b7ebd6e-4ae4-4e24-b15f-b149b88ab987\") " pod="openstack/root-account-create-update-zlg86" Jan 21 15:44:04 crc kubenswrapper[5021]: I0121 15:44:04.296438 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tjmkx\" (UniqueName: \"kubernetes.io/projected/4b7ebd6e-4ae4-4e24-b15f-b149b88ab987-kube-api-access-tjmkx\") pod \"root-account-create-update-zlg86\" (UID: \"4b7ebd6e-4ae4-4e24-b15f-b149b88ab987\") " pod="openstack/root-account-create-update-zlg86" Jan 21 15:44:04 crc kubenswrapper[5021]: I0121 15:44:04.423707 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-zlg86" Jan 21 15:44:05 crc kubenswrapper[5021]: I0121 15:44:05.161046 5021 generic.go:334] "Generic (PLEG): container finished" podID="6722dc85-c75c-4751-b0c9-1a7ffc45afaa" containerID="c3966764226e28e67ed97336fca66009bbc265e4b7084b130fa49364be513ede" exitCode=0 Jan 21 15:44:05 crc kubenswrapper[5021]: I0121 15:44:05.161185 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d5b6d6b67-kprj2" event={"ID":"6722dc85-c75c-4751-b0c9-1a7ffc45afaa","Type":"ContainerDied","Data":"c3966764226e28e67ed97336fca66009bbc265e4b7084b130fa49364be513ede"} Jan 21 15:44:05 crc kubenswrapper[5021]: W0121 15:44:05.612417 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4b7ebd6e_4ae4_4e24_b15f_b149b88ab987.slice/crio-e293e388990238b2cb9b8501463f0dc6c028255f98e26b078ed060267f5d78cc WatchSource:0}: Error finding container e293e388990238b2cb9b8501463f0dc6c028255f98e26b078ed060267f5d78cc: Status 404 returned error can't find the container with id e293e388990238b2cb9b8501463f0dc6c028255f98e26b078ed060267f5d78cc Jan 21 15:44:05 crc kubenswrapper[5021]: I0121 15:44:05.614545 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-zlg86"] Jan 21 15:44:06 crc kubenswrapper[5021]: I0121 15:44:06.170548 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d5b6d6b67-kprj2" event={"ID":"6722dc85-c75c-4751-b0c9-1a7ffc45afaa","Type":"ContainerStarted","Data":"93f595b09f88348355e1e3d069b9cbd62a7fe5a5d5da20d2e57498fab1e77609"} Jan 21 15:44:06 crc kubenswrapper[5021]: I0121 15:44:06.170992 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6d5b6d6b67-kprj2" Jan 21 15:44:06 crc kubenswrapper[5021]: I0121 15:44:06.172006 5021 generic.go:334] "Generic (PLEG): container finished" podID="4b7ebd6e-4ae4-4e24-b15f-b149b88ab987" containerID="31eb86a8dd09e080838036bb26c536e93153e4ab7d25d69884f6816ae53e5ab2" exitCode=0 Jan 21 15:44:06 crc kubenswrapper[5021]: I0121 15:44:06.172177 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-zlg86" event={"ID":"4b7ebd6e-4ae4-4e24-b15f-b149b88ab987","Type":"ContainerDied","Data":"31eb86a8dd09e080838036bb26c536e93153e4ab7d25d69884f6816ae53e5ab2"} Jan 21 15:44:06 crc kubenswrapper[5021]: I0121 15:44:06.172284 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-zlg86" event={"ID":"4b7ebd6e-4ae4-4e24-b15f-b149b88ab987","Type":"ContainerStarted","Data":"e293e388990238b2cb9b8501463f0dc6c028255f98e26b078ed060267f5d78cc"} Jan 21 15:44:06 crc kubenswrapper[5021]: I0121 15:44:06.192989 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6d5b6d6b67-kprj2" podStartSLOduration=5.192971259 podStartE2EDuration="5.192971259s" podCreationTimestamp="2026-01-21 15:44:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:44:06.18781699 +0000 UTC m=+1187.722930879" watchObservedRunningTime="2026-01-21 15:44:06.192971259 +0000 UTC m=+1187.728085148" Jan 21 15:44:07 crc kubenswrapper[5021]: I0121 15:44:07.863233 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-zlg86" Jan 21 15:44:08 crc kubenswrapper[5021]: I0121 15:44:08.051778 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tjmkx\" (UniqueName: \"kubernetes.io/projected/4b7ebd6e-4ae4-4e24-b15f-b149b88ab987-kube-api-access-tjmkx\") pod \"4b7ebd6e-4ae4-4e24-b15f-b149b88ab987\" (UID: \"4b7ebd6e-4ae4-4e24-b15f-b149b88ab987\") " Jan 21 15:44:08 crc kubenswrapper[5021]: I0121 15:44:08.052572 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b7ebd6e-4ae4-4e24-b15f-b149b88ab987-operator-scripts\") pod \"4b7ebd6e-4ae4-4e24-b15f-b149b88ab987\" (UID: \"4b7ebd6e-4ae4-4e24-b15f-b149b88ab987\") " Jan 21 15:44:08 crc kubenswrapper[5021]: I0121 15:44:08.053779 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4b7ebd6e-4ae4-4e24-b15f-b149b88ab987-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4b7ebd6e-4ae4-4e24-b15f-b149b88ab987" (UID: "4b7ebd6e-4ae4-4e24-b15f-b149b88ab987"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:44:08 crc kubenswrapper[5021]: I0121 15:44:08.062635 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b7ebd6e-4ae4-4e24-b15f-b149b88ab987-kube-api-access-tjmkx" (OuterVolumeSpecName: "kube-api-access-tjmkx") pod "4b7ebd6e-4ae4-4e24-b15f-b149b88ab987" (UID: "4b7ebd6e-4ae4-4e24-b15f-b149b88ab987"). InnerVolumeSpecName "kube-api-access-tjmkx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:44:08 crc kubenswrapper[5021]: I0121 15:44:08.154063 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tjmkx\" (UniqueName: \"kubernetes.io/projected/4b7ebd6e-4ae4-4e24-b15f-b149b88ab987-kube-api-access-tjmkx\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:08 crc kubenswrapper[5021]: I0121 15:44:08.154097 5021 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b7ebd6e-4ae4-4e24-b15f-b149b88ab987-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:08 crc kubenswrapper[5021]: I0121 15:44:08.186334 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-zlg86" event={"ID":"4b7ebd6e-4ae4-4e24-b15f-b149b88ab987","Type":"ContainerDied","Data":"e293e388990238b2cb9b8501463f0dc6c028255f98e26b078ed060267f5d78cc"} Jan 21 15:44:08 crc kubenswrapper[5021]: I0121 15:44:08.186373 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e293e388990238b2cb9b8501463f0dc6c028255f98e26b078ed060267f5d78cc" Jan 21 15:44:08 crc kubenswrapper[5021]: I0121 15:44:08.186430 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-zlg86" Jan 21 15:44:10 crc kubenswrapper[5021]: I0121 15:44:10.327538 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-zlg86"] Jan 21 15:44:10 crc kubenswrapper[5021]: I0121 15:44:10.336733 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-zlg86"] Jan 21 15:44:10 crc kubenswrapper[5021]: I0121 15:44:10.748372 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4b7ebd6e-4ae4-4e24-b15f-b149b88ab987" path="/var/lib/kubelet/pods/4b7ebd6e-4ae4-4e24-b15f-b149b88ab987/volumes" Jan 21 15:44:11 crc kubenswrapper[5021]: I0121 15:44:11.207336 5021 generic.go:334] "Generic (PLEG): container finished" podID="c39660b0-c6d5-4b6e-95b8-12b8fbf38a14" containerID="d9a507933f6db91750f321b78dd88b1f0d89e634d83ca2dd3c6ff7e83051f0c9" exitCode=0 Jan 21 15:44:11 crc kubenswrapper[5021]: I0121 15:44:11.207389 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-8jhv7" event={"ID":"c39660b0-c6d5-4b6e-95b8-12b8fbf38a14","Type":"ContainerDied","Data":"d9a507933f6db91750f321b78dd88b1f0d89e634d83ca2dd3c6ff7e83051f0c9"} Jan 21 15:44:11 crc kubenswrapper[5021]: I0121 15:44:11.767155 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6d5b6d6b67-kprj2" Jan 21 15:44:11 crc kubenswrapper[5021]: I0121 15:44:11.842167 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-hmcx6"] Jan 21 15:44:11 crc kubenswrapper[5021]: I0121 15:44:11.842462 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-b8fbc5445-hmcx6" podUID="bc460a0a-094d-4c9d-882f-3bd263f6a4c3" containerName="dnsmasq-dns" containerID="cri-o://f379b38122b79af8352111451a3c240ec9ee109be0b0e1e000c38625d2933c70" gracePeriod=10 Jan 21 15:44:12 crc kubenswrapper[5021]: I0121 15:44:12.229879 5021 generic.go:334] "Generic (PLEG): container finished" podID="bc460a0a-094d-4c9d-882f-3bd263f6a4c3" containerID="f379b38122b79af8352111451a3c240ec9ee109be0b0e1e000c38625d2933c70" exitCode=0 Jan 21 15:44:12 crc kubenswrapper[5021]: I0121 15:44:12.229969 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-hmcx6" event={"ID":"bc460a0a-094d-4c9d-882f-3bd263f6a4c3","Type":"ContainerDied","Data":"f379b38122b79af8352111451a3c240ec9ee109be0b0e1e000c38625d2933c70"} Jan 21 15:44:12 crc kubenswrapper[5021]: I0121 15:44:12.230310 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-hmcx6" event={"ID":"bc460a0a-094d-4c9d-882f-3bd263f6a4c3","Type":"ContainerDied","Data":"030cd3aad1e5e6e8c5803da74c5fba479a88a767b135c8d05eda00b80a1fb5d6"} Jan 21 15:44:12 crc kubenswrapper[5021]: I0121 15:44:12.230331 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="030cd3aad1e5e6e8c5803da74c5fba479a88a767b135c8d05eda00b80a1fb5d6" Jan 21 15:44:12 crc kubenswrapper[5021]: I0121 15:44:12.303322 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-hmcx6" Jan 21 15:44:12 crc kubenswrapper[5021]: I0121 15:44:12.357195 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 15:44:12 crc kubenswrapper[5021]: I0121 15:44:12.357254 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 15:44:12 crc kubenswrapper[5021]: I0121 15:44:12.357298 5021 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" Jan 21 15:44:12 crc kubenswrapper[5021]: I0121 15:44:12.357973 5021 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3a78b8f8661ff4b02409ce688af67e4727c9123ec4900a181065acc3a089426c"} pod="openshift-machine-config-operator/machine-config-daemon-n22xz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 21 15:44:12 crc kubenswrapper[5021]: I0121 15:44:12.358031 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" containerID="cri-o://3a78b8f8661ff4b02409ce688af67e4727c9123ec4900a181065acc3a089426c" gracePeriod=600 Jan 21 15:44:12 crc kubenswrapper[5021]: I0121 15:44:12.426692 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bc460a0a-094d-4c9d-882f-3bd263f6a4c3-config\") pod \"bc460a0a-094d-4c9d-882f-3bd263f6a4c3\" (UID: \"bc460a0a-094d-4c9d-882f-3bd263f6a4c3\") " Jan 21 15:44:12 crc kubenswrapper[5021]: I0121 15:44:12.426804 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bc460a0a-094d-4c9d-882f-3bd263f6a4c3-dns-svc\") pod \"bc460a0a-094d-4c9d-882f-3bd263f6a4c3\" (UID: \"bc460a0a-094d-4c9d-882f-3bd263f6a4c3\") " Jan 21 15:44:12 crc kubenswrapper[5021]: I0121 15:44:12.426843 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pw2ms\" (UniqueName: \"kubernetes.io/projected/bc460a0a-094d-4c9d-882f-3bd263f6a4c3-kube-api-access-pw2ms\") pod \"bc460a0a-094d-4c9d-882f-3bd263f6a4c3\" (UID: \"bc460a0a-094d-4c9d-882f-3bd263f6a4c3\") " Jan 21 15:44:12 crc kubenswrapper[5021]: I0121 15:44:12.426923 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bc460a0a-094d-4c9d-882f-3bd263f6a4c3-ovsdbserver-nb\") pod \"bc460a0a-094d-4c9d-882f-3bd263f6a4c3\" (UID: \"bc460a0a-094d-4c9d-882f-3bd263f6a4c3\") " Jan 21 15:44:12 crc kubenswrapper[5021]: I0121 15:44:12.427028 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bc460a0a-094d-4c9d-882f-3bd263f6a4c3-ovsdbserver-sb\") pod \"bc460a0a-094d-4c9d-882f-3bd263f6a4c3\" (UID: \"bc460a0a-094d-4c9d-882f-3bd263f6a4c3\") " Jan 21 15:44:12 crc kubenswrapper[5021]: I0121 15:44:12.435216 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc460a0a-094d-4c9d-882f-3bd263f6a4c3-kube-api-access-pw2ms" (OuterVolumeSpecName: "kube-api-access-pw2ms") pod "bc460a0a-094d-4c9d-882f-3bd263f6a4c3" (UID: "bc460a0a-094d-4c9d-882f-3bd263f6a4c3"). InnerVolumeSpecName "kube-api-access-pw2ms". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:44:12 crc kubenswrapper[5021]: I0121 15:44:12.483671 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bc460a0a-094d-4c9d-882f-3bd263f6a4c3-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "bc460a0a-094d-4c9d-882f-3bd263f6a4c3" (UID: "bc460a0a-094d-4c9d-882f-3bd263f6a4c3"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:44:12 crc kubenswrapper[5021]: I0121 15:44:12.485344 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bc460a0a-094d-4c9d-882f-3bd263f6a4c3-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "bc460a0a-094d-4c9d-882f-3bd263f6a4c3" (UID: "bc460a0a-094d-4c9d-882f-3bd263f6a4c3"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:44:12 crc kubenswrapper[5021]: I0121 15:44:12.491131 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bc460a0a-094d-4c9d-882f-3bd263f6a4c3-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "bc460a0a-094d-4c9d-882f-3bd263f6a4c3" (UID: "bc460a0a-094d-4c9d-882f-3bd263f6a4c3"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:44:12 crc kubenswrapper[5021]: I0121 15:44:12.511573 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bc460a0a-094d-4c9d-882f-3bd263f6a4c3-config" (OuterVolumeSpecName: "config") pod "bc460a0a-094d-4c9d-882f-3bd263f6a4c3" (UID: "bc460a0a-094d-4c9d-882f-3bd263f6a4c3"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:44:12 crc kubenswrapper[5021]: I0121 15:44:12.530333 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pw2ms\" (UniqueName: \"kubernetes.io/projected/bc460a0a-094d-4c9d-882f-3bd263f6a4c3-kube-api-access-pw2ms\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:12 crc kubenswrapper[5021]: I0121 15:44:12.530712 5021 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bc460a0a-094d-4c9d-882f-3bd263f6a4c3-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:12 crc kubenswrapper[5021]: I0121 15:44:12.533845 5021 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bc460a0a-094d-4c9d-882f-3bd263f6a4c3-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:12 crc kubenswrapper[5021]: I0121 15:44:12.533868 5021 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bc460a0a-094d-4c9d-882f-3bd263f6a4c3-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:12 crc kubenswrapper[5021]: I0121 15:44:12.533881 5021 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bc460a0a-094d-4c9d-882f-3bd263f6a4c3-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:12 crc kubenswrapper[5021]: I0121 15:44:12.569550 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-8jhv7" Jan 21 15:44:12 crc kubenswrapper[5021]: I0121 15:44:12.742794 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c39660b0-c6d5-4b6e-95b8-12b8fbf38a14-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "c39660b0-c6d5-4b6e-95b8-12b8fbf38a14" (UID: "c39660b0-c6d5-4b6e-95b8-12b8fbf38a14"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:44:12 crc kubenswrapper[5021]: I0121 15:44:12.745004 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c39660b0-c6d5-4b6e-95b8-12b8fbf38a14-db-sync-config-data\") pod \"c39660b0-c6d5-4b6e-95b8-12b8fbf38a14\" (UID: \"c39660b0-c6d5-4b6e-95b8-12b8fbf38a14\") " Jan 21 15:44:12 crc kubenswrapper[5021]: I0121 15:44:12.745086 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m8sv4\" (UniqueName: \"kubernetes.io/projected/c39660b0-c6d5-4b6e-95b8-12b8fbf38a14-kube-api-access-m8sv4\") pod \"c39660b0-c6d5-4b6e-95b8-12b8fbf38a14\" (UID: \"c39660b0-c6d5-4b6e-95b8-12b8fbf38a14\") " Jan 21 15:44:12 crc kubenswrapper[5021]: I0121 15:44:12.745145 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c39660b0-c6d5-4b6e-95b8-12b8fbf38a14-config-data\") pod \"c39660b0-c6d5-4b6e-95b8-12b8fbf38a14\" (UID: \"c39660b0-c6d5-4b6e-95b8-12b8fbf38a14\") " Jan 21 15:44:12 crc kubenswrapper[5021]: I0121 15:44:12.745212 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c39660b0-c6d5-4b6e-95b8-12b8fbf38a14-combined-ca-bundle\") pod \"c39660b0-c6d5-4b6e-95b8-12b8fbf38a14\" (UID: \"c39660b0-c6d5-4b6e-95b8-12b8fbf38a14\") " Jan 21 15:44:12 crc kubenswrapper[5021]: I0121 15:44:12.746448 5021 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c39660b0-c6d5-4b6e-95b8-12b8fbf38a14-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:12 crc kubenswrapper[5021]: I0121 15:44:12.748303 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c39660b0-c6d5-4b6e-95b8-12b8fbf38a14-kube-api-access-m8sv4" (OuterVolumeSpecName: "kube-api-access-m8sv4") pod "c39660b0-c6d5-4b6e-95b8-12b8fbf38a14" (UID: "c39660b0-c6d5-4b6e-95b8-12b8fbf38a14"). InnerVolumeSpecName "kube-api-access-m8sv4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:44:12 crc kubenswrapper[5021]: I0121 15:44:12.777810 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c39660b0-c6d5-4b6e-95b8-12b8fbf38a14-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c39660b0-c6d5-4b6e-95b8-12b8fbf38a14" (UID: "c39660b0-c6d5-4b6e-95b8-12b8fbf38a14"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:44:12 crc kubenswrapper[5021]: I0121 15:44:12.809956 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c39660b0-c6d5-4b6e-95b8-12b8fbf38a14-config-data" (OuterVolumeSpecName: "config-data") pod "c39660b0-c6d5-4b6e-95b8-12b8fbf38a14" (UID: "c39660b0-c6d5-4b6e-95b8-12b8fbf38a14"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:44:12 crc kubenswrapper[5021]: I0121 15:44:12.848187 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m8sv4\" (UniqueName: \"kubernetes.io/projected/c39660b0-c6d5-4b6e-95b8-12b8fbf38a14-kube-api-access-m8sv4\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:12 crc kubenswrapper[5021]: I0121 15:44:12.848225 5021 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c39660b0-c6d5-4b6e-95b8-12b8fbf38a14-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:12 crc kubenswrapper[5021]: I0121 15:44:12.848235 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c39660b0-c6d5-4b6e-95b8-12b8fbf38a14-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:13 crc kubenswrapper[5021]: I0121 15:44:13.240478 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-8jhv7" Jan 21 15:44:13 crc kubenswrapper[5021]: I0121 15:44:13.240667 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-8jhv7" event={"ID":"c39660b0-c6d5-4b6e-95b8-12b8fbf38a14","Type":"ContainerDied","Data":"3df90f1f0b422311705dfaad4ebece1648e025159178be7dfd313298fd90619c"} Jan 21 15:44:13 crc kubenswrapper[5021]: I0121 15:44:13.241277 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3df90f1f0b422311705dfaad4ebece1648e025159178be7dfd313298fd90619c" Jan 21 15:44:13 crc kubenswrapper[5021]: I0121 15:44:13.243540 5021 generic.go:334] "Generic (PLEG): container finished" podID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerID="3a78b8f8661ff4b02409ce688af67e4727c9123ec4900a181065acc3a089426c" exitCode=0 Jan 21 15:44:13 crc kubenswrapper[5021]: I0121 15:44:13.243634 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-hmcx6" Jan 21 15:44:13 crc kubenswrapper[5021]: I0121 15:44:13.243969 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" event={"ID":"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1","Type":"ContainerDied","Data":"3a78b8f8661ff4b02409ce688af67e4727c9123ec4900a181065acc3a089426c"} Jan 21 15:44:13 crc kubenswrapper[5021]: I0121 15:44:13.244028 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" event={"ID":"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1","Type":"ContainerStarted","Data":"05d959e8221a0471293433aa813fc9a057fa0942334f34a10dd166457b8ac583"} Jan 21 15:44:13 crc kubenswrapper[5021]: I0121 15:44:13.244051 5021 scope.go:117] "RemoveContainer" containerID="2ea8d15572e39e256e507de01c714d92322ce002bb3c73880321feefeec92859" Jan 21 15:44:13 crc kubenswrapper[5021]: I0121 15:44:13.312577 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-hmcx6"] Jan 21 15:44:13 crc kubenswrapper[5021]: I0121 15:44:13.335583 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-hmcx6"] Jan 21 15:44:13 crc kubenswrapper[5021]: I0121 15:44:13.783324 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-895cf5cf-kdjcl"] Jan 21 15:44:13 crc kubenswrapper[5021]: E0121 15:44:13.783653 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c39660b0-c6d5-4b6e-95b8-12b8fbf38a14" containerName="glance-db-sync" Jan 21 15:44:13 crc kubenswrapper[5021]: I0121 15:44:13.783666 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="c39660b0-c6d5-4b6e-95b8-12b8fbf38a14" containerName="glance-db-sync" Jan 21 15:44:13 crc kubenswrapper[5021]: E0121 15:44:13.783678 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc460a0a-094d-4c9d-882f-3bd263f6a4c3" containerName="init" Jan 21 15:44:13 crc kubenswrapper[5021]: I0121 15:44:13.783684 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc460a0a-094d-4c9d-882f-3bd263f6a4c3" containerName="init" Jan 21 15:44:13 crc kubenswrapper[5021]: E0121 15:44:13.783698 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b7ebd6e-4ae4-4e24-b15f-b149b88ab987" containerName="mariadb-account-create-update" Jan 21 15:44:13 crc kubenswrapper[5021]: I0121 15:44:13.783704 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b7ebd6e-4ae4-4e24-b15f-b149b88ab987" containerName="mariadb-account-create-update" Jan 21 15:44:13 crc kubenswrapper[5021]: E0121 15:44:13.783727 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc460a0a-094d-4c9d-882f-3bd263f6a4c3" containerName="dnsmasq-dns" Jan 21 15:44:13 crc kubenswrapper[5021]: I0121 15:44:13.783733 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc460a0a-094d-4c9d-882f-3bd263f6a4c3" containerName="dnsmasq-dns" Jan 21 15:44:13 crc kubenswrapper[5021]: I0121 15:44:13.783880 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="c39660b0-c6d5-4b6e-95b8-12b8fbf38a14" containerName="glance-db-sync" Jan 21 15:44:13 crc kubenswrapper[5021]: I0121 15:44:13.783898 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b7ebd6e-4ae4-4e24-b15f-b149b88ab987" containerName="mariadb-account-create-update" Jan 21 15:44:13 crc kubenswrapper[5021]: I0121 15:44:13.783930 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="bc460a0a-094d-4c9d-882f-3bd263f6a4c3" containerName="dnsmasq-dns" Jan 21 15:44:13 crc kubenswrapper[5021]: I0121 15:44:13.784880 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-895cf5cf-kdjcl" Jan 21 15:44:13 crc kubenswrapper[5021]: I0121 15:44:13.820092 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-895cf5cf-kdjcl"] Jan 21 15:44:13 crc kubenswrapper[5021]: I0121 15:44:13.967002 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-57l5c\" (UniqueName: \"kubernetes.io/projected/8411f55a-d436-4afe-a766-3f9f32fbdea5-kube-api-access-57l5c\") pod \"dnsmasq-dns-895cf5cf-kdjcl\" (UID: \"8411f55a-d436-4afe-a766-3f9f32fbdea5\") " pod="openstack/dnsmasq-dns-895cf5cf-kdjcl" Jan 21 15:44:13 crc kubenswrapper[5021]: I0121 15:44:13.967058 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8411f55a-d436-4afe-a766-3f9f32fbdea5-config\") pod \"dnsmasq-dns-895cf5cf-kdjcl\" (UID: \"8411f55a-d436-4afe-a766-3f9f32fbdea5\") " pod="openstack/dnsmasq-dns-895cf5cf-kdjcl" Jan 21 15:44:13 crc kubenswrapper[5021]: I0121 15:44:13.967103 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8411f55a-d436-4afe-a766-3f9f32fbdea5-ovsdbserver-sb\") pod \"dnsmasq-dns-895cf5cf-kdjcl\" (UID: \"8411f55a-d436-4afe-a766-3f9f32fbdea5\") " pod="openstack/dnsmasq-dns-895cf5cf-kdjcl" Jan 21 15:44:13 crc kubenswrapper[5021]: I0121 15:44:13.967133 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8411f55a-d436-4afe-a766-3f9f32fbdea5-dns-swift-storage-0\") pod \"dnsmasq-dns-895cf5cf-kdjcl\" (UID: \"8411f55a-d436-4afe-a766-3f9f32fbdea5\") " pod="openstack/dnsmasq-dns-895cf5cf-kdjcl" Jan 21 15:44:13 crc kubenswrapper[5021]: I0121 15:44:13.967163 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8411f55a-d436-4afe-a766-3f9f32fbdea5-ovsdbserver-nb\") pod \"dnsmasq-dns-895cf5cf-kdjcl\" (UID: \"8411f55a-d436-4afe-a766-3f9f32fbdea5\") " pod="openstack/dnsmasq-dns-895cf5cf-kdjcl" Jan 21 15:44:13 crc kubenswrapper[5021]: I0121 15:44:13.967185 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8411f55a-d436-4afe-a766-3f9f32fbdea5-dns-svc\") pod \"dnsmasq-dns-895cf5cf-kdjcl\" (UID: \"8411f55a-d436-4afe-a766-3f9f32fbdea5\") " pod="openstack/dnsmasq-dns-895cf5cf-kdjcl" Jan 21 15:44:14 crc kubenswrapper[5021]: I0121 15:44:14.068710 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-57l5c\" (UniqueName: \"kubernetes.io/projected/8411f55a-d436-4afe-a766-3f9f32fbdea5-kube-api-access-57l5c\") pod \"dnsmasq-dns-895cf5cf-kdjcl\" (UID: \"8411f55a-d436-4afe-a766-3f9f32fbdea5\") " pod="openstack/dnsmasq-dns-895cf5cf-kdjcl" Jan 21 15:44:14 crc kubenswrapper[5021]: I0121 15:44:14.068768 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8411f55a-d436-4afe-a766-3f9f32fbdea5-config\") pod \"dnsmasq-dns-895cf5cf-kdjcl\" (UID: \"8411f55a-d436-4afe-a766-3f9f32fbdea5\") " pod="openstack/dnsmasq-dns-895cf5cf-kdjcl" Jan 21 15:44:14 crc kubenswrapper[5021]: I0121 15:44:14.068810 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8411f55a-d436-4afe-a766-3f9f32fbdea5-ovsdbserver-sb\") pod \"dnsmasq-dns-895cf5cf-kdjcl\" (UID: \"8411f55a-d436-4afe-a766-3f9f32fbdea5\") " pod="openstack/dnsmasq-dns-895cf5cf-kdjcl" Jan 21 15:44:14 crc kubenswrapper[5021]: I0121 15:44:14.068841 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8411f55a-d436-4afe-a766-3f9f32fbdea5-dns-swift-storage-0\") pod \"dnsmasq-dns-895cf5cf-kdjcl\" (UID: \"8411f55a-d436-4afe-a766-3f9f32fbdea5\") " pod="openstack/dnsmasq-dns-895cf5cf-kdjcl" Jan 21 15:44:14 crc kubenswrapper[5021]: I0121 15:44:14.068872 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8411f55a-d436-4afe-a766-3f9f32fbdea5-ovsdbserver-nb\") pod \"dnsmasq-dns-895cf5cf-kdjcl\" (UID: \"8411f55a-d436-4afe-a766-3f9f32fbdea5\") " pod="openstack/dnsmasq-dns-895cf5cf-kdjcl" Jan 21 15:44:14 crc kubenswrapper[5021]: I0121 15:44:14.068893 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8411f55a-d436-4afe-a766-3f9f32fbdea5-dns-svc\") pod \"dnsmasq-dns-895cf5cf-kdjcl\" (UID: \"8411f55a-d436-4afe-a766-3f9f32fbdea5\") " pod="openstack/dnsmasq-dns-895cf5cf-kdjcl" Jan 21 15:44:14 crc kubenswrapper[5021]: I0121 15:44:14.069900 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8411f55a-d436-4afe-a766-3f9f32fbdea5-dns-svc\") pod \"dnsmasq-dns-895cf5cf-kdjcl\" (UID: \"8411f55a-d436-4afe-a766-3f9f32fbdea5\") " pod="openstack/dnsmasq-dns-895cf5cf-kdjcl" Jan 21 15:44:14 crc kubenswrapper[5021]: I0121 15:44:14.070009 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8411f55a-d436-4afe-a766-3f9f32fbdea5-ovsdbserver-sb\") pod \"dnsmasq-dns-895cf5cf-kdjcl\" (UID: \"8411f55a-d436-4afe-a766-3f9f32fbdea5\") " pod="openstack/dnsmasq-dns-895cf5cf-kdjcl" Jan 21 15:44:14 crc kubenswrapper[5021]: I0121 15:44:14.070195 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8411f55a-d436-4afe-a766-3f9f32fbdea5-dns-swift-storage-0\") pod \"dnsmasq-dns-895cf5cf-kdjcl\" (UID: \"8411f55a-d436-4afe-a766-3f9f32fbdea5\") " pod="openstack/dnsmasq-dns-895cf5cf-kdjcl" Jan 21 15:44:14 crc kubenswrapper[5021]: I0121 15:44:14.070561 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8411f55a-d436-4afe-a766-3f9f32fbdea5-ovsdbserver-nb\") pod \"dnsmasq-dns-895cf5cf-kdjcl\" (UID: \"8411f55a-d436-4afe-a766-3f9f32fbdea5\") " pod="openstack/dnsmasq-dns-895cf5cf-kdjcl" Jan 21 15:44:14 crc kubenswrapper[5021]: I0121 15:44:14.070824 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8411f55a-d436-4afe-a766-3f9f32fbdea5-config\") pod \"dnsmasq-dns-895cf5cf-kdjcl\" (UID: \"8411f55a-d436-4afe-a766-3f9f32fbdea5\") " pod="openstack/dnsmasq-dns-895cf5cf-kdjcl" Jan 21 15:44:14 crc kubenswrapper[5021]: I0121 15:44:14.088046 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-57l5c\" (UniqueName: \"kubernetes.io/projected/8411f55a-d436-4afe-a766-3f9f32fbdea5-kube-api-access-57l5c\") pod \"dnsmasq-dns-895cf5cf-kdjcl\" (UID: \"8411f55a-d436-4afe-a766-3f9f32fbdea5\") " pod="openstack/dnsmasq-dns-895cf5cf-kdjcl" Jan 21 15:44:14 crc kubenswrapper[5021]: I0121 15:44:14.100745 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-895cf5cf-kdjcl" Jan 21 15:44:15 crc kubenswrapper[5021]: I0121 15:44:14.273223 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-xtcpm" event={"ID":"9984d786-ae3e-4cfe-8bf6-099159dada65","Type":"ContainerStarted","Data":"c71262d50510701f84d14102a9000a8be2165c803d4f6eb3c3946ce234d084ae"} Jan 21 15:44:15 crc kubenswrapper[5021]: I0121 15:44:14.308440 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-xtcpm" podStartSLOduration=13.996807147 podStartE2EDuration="45.308414958s" podCreationTimestamp="2026-01-21 15:43:29 +0000 UTC" firstStartedPulling="2026-01-21 15:43:41.881044226 +0000 UTC m=+1163.416158115" lastFinishedPulling="2026-01-21 15:44:13.192652037 +0000 UTC m=+1194.727765926" observedRunningTime="2026-01-21 15:44:14.299161049 +0000 UTC m=+1195.834274928" watchObservedRunningTime="2026-01-21 15:44:14.308414958 +0000 UTC m=+1195.843528857" Jan 21 15:44:15 crc kubenswrapper[5021]: I0121 15:44:14.750583 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc460a0a-094d-4c9d-882f-3bd263f6a4c3" path="/var/lib/kubelet/pods/bc460a0a-094d-4c9d-882f-3bd263f6a4c3/volumes" Jan 21 15:44:15 crc kubenswrapper[5021]: I0121 15:44:15.382364 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-t7cgc"] Jan 21 15:44:15 crc kubenswrapper[5021]: I0121 15:44:15.384749 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-t7cgc" Jan 21 15:44:15 crc kubenswrapper[5021]: I0121 15:44:15.387666 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-mariadb-root-db-secret" Jan 21 15:44:15 crc kubenswrapper[5021]: I0121 15:44:15.393453 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-t7cgc"] Jan 21 15:44:15 crc kubenswrapper[5021]: I0121 15:44:15.476940 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-895cf5cf-kdjcl"] Jan 21 15:44:15 crc kubenswrapper[5021]: I0121 15:44:15.508220 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q85hl\" (UniqueName: \"kubernetes.io/projected/3bfd21fb-7d79-4523-9626-3fcc93ff1db3-kube-api-access-q85hl\") pod \"root-account-create-update-t7cgc\" (UID: \"3bfd21fb-7d79-4523-9626-3fcc93ff1db3\") " pod="openstack/root-account-create-update-t7cgc" Jan 21 15:44:15 crc kubenswrapper[5021]: I0121 15:44:15.508454 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3bfd21fb-7d79-4523-9626-3fcc93ff1db3-operator-scripts\") pod \"root-account-create-update-t7cgc\" (UID: \"3bfd21fb-7d79-4523-9626-3fcc93ff1db3\") " pod="openstack/root-account-create-update-t7cgc" Jan 21 15:44:15 crc kubenswrapper[5021]: I0121 15:44:15.609960 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q85hl\" (UniqueName: \"kubernetes.io/projected/3bfd21fb-7d79-4523-9626-3fcc93ff1db3-kube-api-access-q85hl\") pod \"root-account-create-update-t7cgc\" (UID: \"3bfd21fb-7d79-4523-9626-3fcc93ff1db3\") " pod="openstack/root-account-create-update-t7cgc" Jan 21 15:44:15 crc kubenswrapper[5021]: I0121 15:44:15.610764 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3bfd21fb-7d79-4523-9626-3fcc93ff1db3-operator-scripts\") pod \"root-account-create-update-t7cgc\" (UID: \"3bfd21fb-7d79-4523-9626-3fcc93ff1db3\") " pod="openstack/root-account-create-update-t7cgc" Jan 21 15:44:15 crc kubenswrapper[5021]: I0121 15:44:15.611561 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3bfd21fb-7d79-4523-9626-3fcc93ff1db3-operator-scripts\") pod \"root-account-create-update-t7cgc\" (UID: \"3bfd21fb-7d79-4523-9626-3fcc93ff1db3\") " pod="openstack/root-account-create-update-t7cgc" Jan 21 15:44:15 crc kubenswrapper[5021]: I0121 15:44:15.631672 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q85hl\" (UniqueName: \"kubernetes.io/projected/3bfd21fb-7d79-4523-9626-3fcc93ff1db3-kube-api-access-q85hl\") pod \"root-account-create-update-t7cgc\" (UID: \"3bfd21fb-7d79-4523-9626-3fcc93ff1db3\") " pod="openstack/root-account-create-update-t7cgc" Jan 21 15:44:15 crc kubenswrapper[5021]: I0121 15:44:15.717464 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-t7cgc" Jan 21 15:44:16 crc kubenswrapper[5021]: I0121 15:44:16.160867 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-t7cgc"] Jan 21 15:44:16 crc kubenswrapper[5021]: I0121 15:44:16.310113 5021 generic.go:334] "Generic (PLEG): container finished" podID="8411f55a-d436-4afe-a766-3f9f32fbdea5" containerID="33605eb0347a6eeebaefe897ab3d452fe1119172475dee1120d600aaeb3ba8db" exitCode=0 Jan 21 15:44:16 crc kubenswrapper[5021]: I0121 15:44:16.310223 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-895cf5cf-kdjcl" event={"ID":"8411f55a-d436-4afe-a766-3f9f32fbdea5","Type":"ContainerDied","Data":"33605eb0347a6eeebaefe897ab3d452fe1119172475dee1120d600aaeb3ba8db"} Jan 21 15:44:16 crc kubenswrapper[5021]: I0121 15:44:16.310517 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-895cf5cf-kdjcl" event={"ID":"8411f55a-d436-4afe-a766-3f9f32fbdea5","Type":"ContainerStarted","Data":"9a1359499b4d36cac4b8c3c4df437e7b325e598ae94f07e1c39a8e0785939872"} Jan 21 15:44:16 crc kubenswrapper[5021]: I0121 15:44:16.314355 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-t7cgc" event={"ID":"3bfd21fb-7d79-4523-9626-3fcc93ff1db3","Type":"ContainerStarted","Data":"d210d69f9cddcd085b4561422c284ffe76fd5025c9fe545ad5a9114be9035384"} Jan 21 15:44:16 crc kubenswrapper[5021]: I0121 15:44:16.314412 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-t7cgc" event={"ID":"3bfd21fb-7d79-4523-9626-3fcc93ff1db3","Type":"ContainerStarted","Data":"65728d69b99d1207bab1707590a7acfbf21b3f90616b8706075ccd8440b7a68a"} Jan 21 15:44:16 crc kubenswrapper[5021]: I0121 15:44:16.369319 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/root-account-create-update-t7cgc" podStartSLOduration=1.369301064 podStartE2EDuration="1.369301064s" podCreationTimestamp="2026-01-21 15:44:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:44:16.364571086 +0000 UTC m=+1197.899684995" watchObservedRunningTime="2026-01-21 15:44:16.369301064 +0000 UTC m=+1197.904414953" Jan 21 15:44:17 crc kubenswrapper[5021]: I0121 15:44:17.324918 5021 generic.go:334] "Generic (PLEG): container finished" podID="9984d786-ae3e-4cfe-8bf6-099159dada65" containerID="c71262d50510701f84d14102a9000a8be2165c803d4f6eb3c3946ce234d084ae" exitCode=0 Jan 21 15:44:17 crc kubenswrapper[5021]: I0121 15:44:17.324995 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-xtcpm" event={"ID":"9984d786-ae3e-4cfe-8bf6-099159dada65","Type":"ContainerDied","Data":"c71262d50510701f84d14102a9000a8be2165c803d4f6eb3c3946ce234d084ae"} Jan 21 15:44:17 crc kubenswrapper[5021]: I0121 15:44:17.328616 5021 generic.go:334] "Generic (PLEG): container finished" podID="3bfd21fb-7d79-4523-9626-3fcc93ff1db3" containerID="d210d69f9cddcd085b4561422c284ffe76fd5025c9fe545ad5a9114be9035384" exitCode=0 Jan 21 15:44:17 crc kubenswrapper[5021]: I0121 15:44:17.328700 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-t7cgc" event={"ID":"3bfd21fb-7d79-4523-9626-3fcc93ff1db3","Type":"ContainerDied","Data":"d210d69f9cddcd085b4561422c284ffe76fd5025c9fe545ad5a9114be9035384"} Jan 21 15:44:17 crc kubenswrapper[5021]: I0121 15:44:17.331500 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-895cf5cf-kdjcl" event={"ID":"8411f55a-d436-4afe-a766-3f9f32fbdea5","Type":"ContainerStarted","Data":"dfd4c48faeefcb7fd030cb2eeefcc956ec9fe6c993d10eab32610c40fdf47d2f"} Jan 21 15:44:17 crc kubenswrapper[5021]: I0121 15:44:17.332372 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-895cf5cf-kdjcl" Jan 21 15:44:17 crc kubenswrapper[5021]: I0121 15:44:17.365890 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-895cf5cf-kdjcl" podStartSLOduration=4.36586863 podStartE2EDuration="4.36586863s" podCreationTimestamp="2026-01-21 15:44:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:44:17.362364336 +0000 UTC m=+1198.897478225" watchObservedRunningTime="2026-01-21 15:44:17.36586863 +0000 UTC m=+1198.900982519" Jan 21 15:44:18 crc kubenswrapper[5021]: I0121 15:44:18.920961 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-xtcpm" Jan 21 15:44:18 crc kubenswrapper[5021]: I0121 15:44:18.929570 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-t7cgc" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.068504 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9984d786-ae3e-4cfe-8bf6-099159dada65-combined-ca-bundle\") pod \"9984d786-ae3e-4cfe-8bf6-099159dada65\" (UID: \"9984d786-ae3e-4cfe-8bf6-099159dada65\") " Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.069038 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q85hl\" (UniqueName: \"kubernetes.io/projected/3bfd21fb-7d79-4523-9626-3fcc93ff1db3-kube-api-access-q85hl\") pod \"3bfd21fb-7d79-4523-9626-3fcc93ff1db3\" (UID: \"3bfd21fb-7d79-4523-9626-3fcc93ff1db3\") " Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.069638 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tjmph\" (UniqueName: \"kubernetes.io/projected/9984d786-ae3e-4cfe-8bf6-099159dada65-kube-api-access-tjmph\") pod \"9984d786-ae3e-4cfe-8bf6-099159dada65\" (UID: \"9984d786-ae3e-4cfe-8bf6-099159dada65\") " Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.069686 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3bfd21fb-7d79-4523-9626-3fcc93ff1db3-operator-scripts\") pod \"3bfd21fb-7d79-4523-9626-3fcc93ff1db3\" (UID: \"3bfd21fb-7d79-4523-9626-3fcc93ff1db3\") " Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.070030 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9984d786-ae3e-4cfe-8bf6-099159dada65-config-data\") pod \"9984d786-ae3e-4cfe-8bf6-099159dada65\" (UID: \"9984d786-ae3e-4cfe-8bf6-099159dada65\") " Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.070483 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3bfd21fb-7d79-4523-9626-3fcc93ff1db3-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3bfd21fb-7d79-4523-9626-3fcc93ff1db3" (UID: "3bfd21fb-7d79-4523-9626-3fcc93ff1db3"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.070862 5021 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3bfd21fb-7d79-4523-9626-3fcc93ff1db3-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.076207 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9984d786-ae3e-4cfe-8bf6-099159dada65-kube-api-access-tjmph" (OuterVolumeSpecName: "kube-api-access-tjmph") pod "9984d786-ae3e-4cfe-8bf6-099159dada65" (UID: "9984d786-ae3e-4cfe-8bf6-099159dada65"). InnerVolumeSpecName "kube-api-access-tjmph". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.076270 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3bfd21fb-7d79-4523-9626-3fcc93ff1db3-kube-api-access-q85hl" (OuterVolumeSpecName: "kube-api-access-q85hl") pod "3bfd21fb-7d79-4523-9626-3fcc93ff1db3" (UID: "3bfd21fb-7d79-4523-9626-3fcc93ff1db3"). InnerVolumeSpecName "kube-api-access-q85hl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.094244 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9984d786-ae3e-4cfe-8bf6-099159dada65-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9984d786-ae3e-4cfe-8bf6-099159dada65" (UID: "9984d786-ae3e-4cfe-8bf6-099159dada65"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.130440 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9984d786-ae3e-4cfe-8bf6-099159dada65-config-data" (OuterVolumeSpecName: "config-data") pod "9984d786-ae3e-4cfe-8bf6-099159dada65" (UID: "9984d786-ae3e-4cfe-8bf6-099159dada65"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.172889 5021 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9984d786-ae3e-4cfe-8bf6-099159dada65-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.172995 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9984d786-ae3e-4cfe-8bf6-099159dada65-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.173012 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q85hl\" (UniqueName: \"kubernetes.io/projected/3bfd21fb-7d79-4523-9626-3fcc93ff1db3-kube-api-access-q85hl\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.173029 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tjmph\" (UniqueName: \"kubernetes.io/projected/9984d786-ae3e-4cfe-8bf6-099159dada65-kube-api-access-tjmph\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.349555 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-t7cgc" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.349547 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-t7cgc" event={"ID":"3bfd21fb-7d79-4523-9626-3fcc93ff1db3","Type":"ContainerDied","Data":"65728d69b99d1207bab1707590a7acfbf21b3f90616b8706075ccd8440b7a68a"} Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.349688 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="65728d69b99d1207bab1707590a7acfbf21b3f90616b8706075ccd8440b7a68a" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.351639 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-xtcpm" event={"ID":"9984d786-ae3e-4cfe-8bf6-099159dada65","Type":"ContainerDied","Data":"f28ddf1e604d407678108f86c506c7a482d69f19116d88e07e9c4402ce32263a"} Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.351675 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f28ddf1e604d407678108f86c506c7a482d69f19116d88e07e9c4402ce32263a" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.351721 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-xtcpm" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.655036 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-7k8dl"] Jan 21 15:44:19 crc kubenswrapper[5021]: E0121 15:44:19.655441 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3bfd21fb-7d79-4523-9626-3fcc93ff1db3" containerName="mariadb-account-create-update" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.655461 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="3bfd21fb-7d79-4523-9626-3fcc93ff1db3" containerName="mariadb-account-create-update" Jan 21 15:44:19 crc kubenswrapper[5021]: E0121 15:44:19.655500 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9984d786-ae3e-4cfe-8bf6-099159dada65" containerName="keystone-db-sync" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.655509 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="9984d786-ae3e-4cfe-8bf6-099159dada65" containerName="keystone-db-sync" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.655690 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="9984d786-ae3e-4cfe-8bf6-099159dada65" containerName="keystone-db-sync" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.655709 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="3bfd21fb-7d79-4523-9626-3fcc93ff1db3" containerName="mariadb-account-create-update" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.661124 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-7k8dl" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.672741 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.673055 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.673270 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.673430 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-8kl7s" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.675732 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.691051 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-7k8dl"] Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.787300 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-895cf5cf-kdjcl"] Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.792596 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f72eda61-8750-46bd-88ce-355f8b36acc7-scripts\") pod \"keystone-bootstrap-7k8dl\" (UID: \"f72eda61-8750-46bd-88ce-355f8b36acc7\") " pod="openstack/keystone-bootstrap-7k8dl" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.792689 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/f72eda61-8750-46bd-88ce-355f8b36acc7-credential-keys\") pod \"keystone-bootstrap-7k8dl\" (UID: \"f72eda61-8750-46bd-88ce-355f8b36acc7\") " pod="openstack/keystone-bootstrap-7k8dl" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.792726 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f72eda61-8750-46bd-88ce-355f8b36acc7-config-data\") pod \"keystone-bootstrap-7k8dl\" (UID: \"f72eda61-8750-46bd-88ce-355f8b36acc7\") " pod="openstack/keystone-bootstrap-7k8dl" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.792797 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f72eda61-8750-46bd-88ce-355f8b36acc7-combined-ca-bundle\") pod \"keystone-bootstrap-7k8dl\" (UID: \"f72eda61-8750-46bd-88ce-355f8b36acc7\") " pod="openstack/keystone-bootstrap-7k8dl" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.792866 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/f72eda61-8750-46bd-88ce-355f8b36acc7-fernet-keys\") pod \"keystone-bootstrap-7k8dl\" (UID: \"f72eda61-8750-46bd-88ce-355f8b36acc7\") " pod="openstack/keystone-bootstrap-7k8dl" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.792930 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kw7wd\" (UniqueName: \"kubernetes.io/projected/f72eda61-8750-46bd-88ce-355f8b36acc7-kube-api-access-kw7wd\") pod \"keystone-bootstrap-7k8dl\" (UID: \"f72eda61-8750-46bd-88ce-355f8b36acc7\") " pod="openstack/keystone-bootstrap-7k8dl" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.849094 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6c9c9f998c-9twkk"] Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.851235 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6c9c9f998c-9twkk" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.874393 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6c9c9f998c-9twkk"] Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.895293 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bec74521-464f-4b1e-bf55-7d2434aa4c10-ovsdbserver-nb\") pod \"dnsmasq-dns-6c9c9f998c-9twkk\" (UID: \"bec74521-464f-4b1e-bf55-7d2434aa4c10\") " pod="openstack/dnsmasq-dns-6c9c9f998c-9twkk" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.895384 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f72eda61-8750-46bd-88ce-355f8b36acc7-combined-ca-bundle\") pod \"keystone-bootstrap-7k8dl\" (UID: \"f72eda61-8750-46bd-88ce-355f8b36acc7\") " pod="openstack/keystone-bootstrap-7k8dl" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.895428 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cnhl5\" (UniqueName: \"kubernetes.io/projected/bec74521-464f-4b1e-bf55-7d2434aa4c10-kube-api-access-cnhl5\") pod \"dnsmasq-dns-6c9c9f998c-9twkk\" (UID: \"bec74521-464f-4b1e-bf55-7d2434aa4c10\") " pod="openstack/dnsmasq-dns-6c9c9f998c-9twkk" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.895482 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bec74521-464f-4b1e-bf55-7d2434aa4c10-ovsdbserver-sb\") pod \"dnsmasq-dns-6c9c9f998c-9twkk\" (UID: \"bec74521-464f-4b1e-bf55-7d2434aa4c10\") " pod="openstack/dnsmasq-dns-6c9c9f998c-9twkk" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.895510 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bec74521-464f-4b1e-bf55-7d2434aa4c10-config\") pod \"dnsmasq-dns-6c9c9f998c-9twkk\" (UID: \"bec74521-464f-4b1e-bf55-7d2434aa4c10\") " pod="openstack/dnsmasq-dns-6c9c9f998c-9twkk" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.895544 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/f72eda61-8750-46bd-88ce-355f8b36acc7-fernet-keys\") pod \"keystone-bootstrap-7k8dl\" (UID: \"f72eda61-8750-46bd-88ce-355f8b36acc7\") " pod="openstack/keystone-bootstrap-7k8dl" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.895655 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bec74521-464f-4b1e-bf55-7d2434aa4c10-dns-swift-storage-0\") pod \"dnsmasq-dns-6c9c9f998c-9twkk\" (UID: \"bec74521-464f-4b1e-bf55-7d2434aa4c10\") " pod="openstack/dnsmasq-dns-6c9c9f998c-9twkk" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.895708 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kw7wd\" (UniqueName: \"kubernetes.io/projected/f72eda61-8750-46bd-88ce-355f8b36acc7-kube-api-access-kw7wd\") pod \"keystone-bootstrap-7k8dl\" (UID: \"f72eda61-8750-46bd-88ce-355f8b36acc7\") " pod="openstack/keystone-bootstrap-7k8dl" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.895732 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bec74521-464f-4b1e-bf55-7d2434aa4c10-dns-svc\") pod \"dnsmasq-dns-6c9c9f998c-9twkk\" (UID: \"bec74521-464f-4b1e-bf55-7d2434aa4c10\") " pod="openstack/dnsmasq-dns-6c9c9f998c-9twkk" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.895897 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f72eda61-8750-46bd-88ce-355f8b36acc7-scripts\") pod \"keystone-bootstrap-7k8dl\" (UID: \"f72eda61-8750-46bd-88ce-355f8b36acc7\") " pod="openstack/keystone-bootstrap-7k8dl" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.895988 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/f72eda61-8750-46bd-88ce-355f8b36acc7-credential-keys\") pod \"keystone-bootstrap-7k8dl\" (UID: \"f72eda61-8750-46bd-88ce-355f8b36acc7\") " pod="openstack/keystone-bootstrap-7k8dl" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.896013 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f72eda61-8750-46bd-88ce-355f8b36acc7-config-data\") pod \"keystone-bootstrap-7k8dl\" (UID: \"f72eda61-8750-46bd-88ce-355f8b36acc7\") " pod="openstack/keystone-bootstrap-7k8dl" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.907139 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f72eda61-8750-46bd-88ce-355f8b36acc7-scripts\") pod \"keystone-bootstrap-7k8dl\" (UID: \"f72eda61-8750-46bd-88ce-355f8b36acc7\") " pod="openstack/keystone-bootstrap-7k8dl" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.908431 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f72eda61-8750-46bd-88ce-355f8b36acc7-combined-ca-bundle\") pod \"keystone-bootstrap-7k8dl\" (UID: \"f72eda61-8750-46bd-88ce-355f8b36acc7\") " pod="openstack/keystone-bootstrap-7k8dl" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.909521 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f72eda61-8750-46bd-88ce-355f8b36acc7-config-data\") pod \"keystone-bootstrap-7k8dl\" (UID: \"f72eda61-8750-46bd-88ce-355f8b36acc7\") " pod="openstack/keystone-bootstrap-7k8dl" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.910152 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/f72eda61-8750-46bd-88ce-355f8b36acc7-credential-keys\") pod \"keystone-bootstrap-7k8dl\" (UID: \"f72eda61-8750-46bd-88ce-355f8b36acc7\") " pod="openstack/keystone-bootstrap-7k8dl" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.912520 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/f72eda61-8750-46bd-88ce-355f8b36acc7-fernet-keys\") pod \"keystone-bootstrap-7k8dl\" (UID: \"f72eda61-8750-46bd-88ce-355f8b36acc7\") " pod="openstack/keystone-bootstrap-7k8dl" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.927087 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kw7wd\" (UniqueName: \"kubernetes.io/projected/f72eda61-8750-46bd-88ce-355f8b36acc7-kube-api-access-kw7wd\") pod \"keystone-bootstrap-7k8dl\" (UID: \"f72eda61-8750-46bd-88ce-355f8b36acc7\") " pod="openstack/keystone-bootstrap-7k8dl" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.951833 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-gm6fx"] Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.953249 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-gm6fx" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.965038 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-gc7s6" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.965357 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.965542 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Jan 21 15:44:19 crc kubenswrapper[5021]: I0121 15:44:19.969971 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-gm6fx"] Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.001862 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.003811 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.007815 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb49e3b7-78e5-4094-9bf0-d25f350d70a2-combined-ca-bundle\") pod \"neutron-db-sync-gm6fx\" (UID: \"cb49e3b7-78e5-4094-9bf0-d25f350d70a2\") " pod="openstack/neutron-db-sync-gm6fx" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.007882 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bec74521-464f-4b1e-bf55-7d2434aa4c10-ovsdbserver-nb\") pod \"dnsmasq-dns-6c9c9f998c-9twkk\" (UID: \"bec74521-464f-4b1e-bf55-7d2434aa4c10\") " pod="openstack/dnsmasq-dns-6c9c9f998c-9twkk" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.007952 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xnplq\" (UniqueName: \"kubernetes.io/projected/cb49e3b7-78e5-4094-9bf0-d25f350d70a2-kube-api-access-xnplq\") pod \"neutron-db-sync-gm6fx\" (UID: \"cb49e3b7-78e5-4094-9bf0-d25f350d70a2\") " pod="openstack/neutron-db-sync-gm6fx" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.007983 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cnhl5\" (UniqueName: \"kubernetes.io/projected/bec74521-464f-4b1e-bf55-7d2434aa4c10-kube-api-access-cnhl5\") pod \"dnsmasq-dns-6c9c9f998c-9twkk\" (UID: \"bec74521-464f-4b1e-bf55-7d2434aa4c10\") " pod="openstack/dnsmasq-dns-6c9c9f998c-9twkk" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.008017 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/cb49e3b7-78e5-4094-9bf0-d25f350d70a2-config\") pod \"neutron-db-sync-gm6fx\" (UID: \"cb49e3b7-78e5-4094-9bf0-d25f350d70a2\") " pod="openstack/neutron-db-sync-gm6fx" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.008044 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bec74521-464f-4b1e-bf55-7d2434aa4c10-ovsdbserver-sb\") pod \"dnsmasq-dns-6c9c9f998c-9twkk\" (UID: \"bec74521-464f-4b1e-bf55-7d2434aa4c10\") " pod="openstack/dnsmasq-dns-6c9c9f998c-9twkk" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.008088 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bec74521-464f-4b1e-bf55-7d2434aa4c10-config\") pod \"dnsmasq-dns-6c9c9f998c-9twkk\" (UID: \"bec74521-464f-4b1e-bf55-7d2434aa4c10\") " pod="openstack/dnsmasq-dns-6c9c9f998c-9twkk" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.008124 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bec74521-464f-4b1e-bf55-7d2434aa4c10-dns-swift-storage-0\") pod \"dnsmasq-dns-6c9c9f998c-9twkk\" (UID: \"bec74521-464f-4b1e-bf55-7d2434aa4c10\") " pod="openstack/dnsmasq-dns-6c9c9f998c-9twkk" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.008145 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bec74521-464f-4b1e-bf55-7d2434aa4c10-dns-svc\") pod \"dnsmasq-dns-6c9c9f998c-9twkk\" (UID: \"bec74521-464f-4b1e-bf55-7d2434aa4c10\") " pod="openstack/dnsmasq-dns-6c9c9f998c-9twkk" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.009056 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bec74521-464f-4b1e-bf55-7d2434aa4c10-dns-svc\") pod \"dnsmasq-dns-6c9c9f998c-9twkk\" (UID: \"bec74521-464f-4b1e-bf55-7d2434aa4c10\") " pod="openstack/dnsmasq-dns-6c9c9f998c-9twkk" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.009792 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bec74521-464f-4b1e-bf55-7d2434aa4c10-ovsdbserver-nb\") pod \"dnsmasq-dns-6c9c9f998c-9twkk\" (UID: \"bec74521-464f-4b1e-bf55-7d2434aa4c10\") " pod="openstack/dnsmasq-dns-6c9c9f998c-9twkk" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.010557 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bec74521-464f-4b1e-bf55-7d2434aa4c10-ovsdbserver-sb\") pod \"dnsmasq-dns-6c9c9f998c-9twkk\" (UID: \"bec74521-464f-4b1e-bf55-7d2434aa4c10\") " pod="openstack/dnsmasq-dns-6c9c9f998c-9twkk" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.010748 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.011084 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bec74521-464f-4b1e-bf55-7d2434aa4c10-config\") pod \"dnsmasq-dns-6c9c9f998c-9twkk\" (UID: \"bec74521-464f-4b1e-bf55-7d2434aa4c10\") " pod="openstack/dnsmasq-dns-6c9c9f998c-9twkk" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.011320 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.011739 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bec74521-464f-4b1e-bf55-7d2434aa4c10-dns-swift-storage-0\") pod \"dnsmasq-dns-6c9c9f998c-9twkk\" (UID: \"bec74521-464f-4b1e-bf55-7d2434aa4c10\") " pod="openstack/dnsmasq-dns-6c9c9f998c-9twkk" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.043099 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.049615 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cnhl5\" (UniqueName: \"kubernetes.io/projected/bec74521-464f-4b1e-bf55-7d2434aa4c10-kube-api-access-cnhl5\") pod \"dnsmasq-dns-6c9c9f998c-9twkk\" (UID: \"bec74521-464f-4b1e-bf55-7d2434aa4c10\") " pod="openstack/dnsmasq-dns-6c9c9f998c-9twkk" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.057742 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-995xv"] Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.069265 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-995xv" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.072588 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.072804 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.080101 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-gft5b" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.109816 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb49e3b7-78e5-4094-9bf0-d25f350d70a2-combined-ca-bundle\") pod \"neutron-db-sync-gm6fx\" (UID: \"cb49e3b7-78e5-4094-9bf0-d25f350d70a2\") " pod="openstack/neutron-db-sync-gm6fx" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.109873 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/30607c41-8b77-404d-80e2-905a915c8697-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"30607c41-8b77-404d-80e2-905a915c8697\") " pod="openstack/ceilometer-0" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.109894 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30607c41-8b77-404d-80e2-905a915c8697-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"30607c41-8b77-404d-80e2-905a915c8697\") " pod="openstack/ceilometer-0" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.109928 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/30607c41-8b77-404d-80e2-905a915c8697-run-httpd\") pod \"ceilometer-0\" (UID: \"30607c41-8b77-404d-80e2-905a915c8697\") " pod="openstack/ceilometer-0" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.109950 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xnplq\" (UniqueName: \"kubernetes.io/projected/cb49e3b7-78e5-4094-9bf0-d25f350d70a2-kube-api-access-xnplq\") pod \"neutron-db-sync-gm6fx\" (UID: \"cb49e3b7-78e5-4094-9bf0-d25f350d70a2\") " pod="openstack/neutron-db-sync-gm6fx" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.109978 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/30607c41-8b77-404d-80e2-905a915c8697-config-data\") pod \"ceilometer-0\" (UID: \"30607c41-8b77-404d-80e2-905a915c8697\") " pod="openstack/ceilometer-0" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.109999 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/cb49e3b7-78e5-4094-9bf0-d25f350d70a2-config\") pod \"neutron-db-sync-gm6fx\" (UID: \"cb49e3b7-78e5-4094-9bf0-d25f350d70a2\") " pod="openstack/neutron-db-sync-gm6fx" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.110015 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-44t5h\" (UniqueName: \"kubernetes.io/projected/30607c41-8b77-404d-80e2-905a915c8697-kube-api-access-44t5h\") pod \"ceilometer-0\" (UID: \"30607c41-8b77-404d-80e2-905a915c8697\") " pod="openstack/ceilometer-0" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.110066 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/30607c41-8b77-404d-80e2-905a915c8697-log-httpd\") pod \"ceilometer-0\" (UID: \"30607c41-8b77-404d-80e2-905a915c8697\") " pod="openstack/ceilometer-0" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.110082 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/30607c41-8b77-404d-80e2-905a915c8697-scripts\") pod \"ceilometer-0\" (UID: \"30607c41-8b77-404d-80e2-905a915c8697\") " pod="openstack/ceilometer-0" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.115545 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/cb49e3b7-78e5-4094-9bf0-d25f350d70a2-config\") pod \"neutron-db-sync-gm6fx\" (UID: \"cb49e3b7-78e5-4094-9bf0-d25f350d70a2\") " pod="openstack/neutron-db-sync-gm6fx" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.121005 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-995xv"] Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.123526 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb49e3b7-78e5-4094-9bf0-d25f350d70a2-combined-ca-bundle\") pod \"neutron-db-sync-gm6fx\" (UID: \"cb49e3b7-78e5-4094-9bf0-d25f350d70a2\") " pod="openstack/neutron-db-sync-gm6fx" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.154186 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xnplq\" (UniqueName: \"kubernetes.io/projected/cb49e3b7-78e5-4094-9bf0-d25f350d70a2-kube-api-access-xnplq\") pod \"neutron-db-sync-gm6fx\" (UID: \"cb49e3b7-78e5-4094-9bf0-d25f350d70a2\") " pod="openstack/neutron-db-sync-gm6fx" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.154251 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-x6jrz"] Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.155244 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-x6jrz" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.155347 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-7k8dl" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.158114 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-dzpf7" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.158558 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.187835 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6c9c9f998c-9twkk" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.195023 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-x6jrz"] Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.203980 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-fxqns"] Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.205381 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-fxqns" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.211353 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/30607c41-8b77-404d-80e2-905a915c8697-scripts\") pod \"ceilometer-0\" (UID: \"30607c41-8b77-404d-80e2-905a915c8697\") " pod="openstack/ceilometer-0" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.211434 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e369fc7c-044b-47cc-964f-601d7c06f150-combined-ca-bundle\") pod \"cinder-db-sync-995xv\" (UID: \"e369fc7c-044b-47cc-964f-601d7c06f150\") " pod="openstack/cinder-db-sync-995xv" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.211483 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ql7wj\" (UniqueName: \"kubernetes.io/projected/e369fc7c-044b-47cc-964f-601d7c06f150-kube-api-access-ql7wj\") pod \"cinder-db-sync-995xv\" (UID: \"e369fc7c-044b-47cc-964f-601d7c06f150\") " pod="openstack/cinder-db-sync-995xv" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.211508 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/52f108a9-a567-4074-88db-05c8c2feea41-db-sync-config-data\") pod \"barbican-db-sync-x6jrz\" (UID: \"52f108a9-a567-4074-88db-05c8c2feea41\") " pod="openstack/barbican-db-sync-x6jrz" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.211554 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e369fc7c-044b-47cc-964f-601d7c06f150-db-sync-config-data\") pod \"cinder-db-sync-995xv\" (UID: \"e369fc7c-044b-47cc-964f-601d7c06f150\") " pod="openstack/cinder-db-sync-995xv" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.211587 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/30607c41-8b77-404d-80e2-905a915c8697-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"30607c41-8b77-404d-80e2-905a915c8697\") " pod="openstack/ceilometer-0" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.211615 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30607c41-8b77-404d-80e2-905a915c8697-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"30607c41-8b77-404d-80e2-905a915c8697\") " pod="openstack/ceilometer-0" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.211644 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/30607c41-8b77-404d-80e2-905a915c8697-run-httpd\") pod \"ceilometer-0\" (UID: \"30607c41-8b77-404d-80e2-905a915c8697\") " pod="openstack/ceilometer-0" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.211674 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e369fc7c-044b-47cc-964f-601d7c06f150-etc-machine-id\") pod \"cinder-db-sync-995xv\" (UID: \"e369fc7c-044b-47cc-964f-601d7c06f150\") " pod="openstack/cinder-db-sync-995xv" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.211719 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/30607c41-8b77-404d-80e2-905a915c8697-config-data\") pod \"ceilometer-0\" (UID: \"30607c41-8b77-404d-80e2-905a915c8697\") " pod="openstack/ceilometer-0" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.211748 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-44t5h\" (UniqueName: \"kubernetes.io/projected/30607c41-8b77-404d-80e2-905a915c8697-kube-api-access-44t5h\") pod \"ceilometer-0\" (UID: \"30607c41-8b77-404d-80e2-905a915c8697\") " pod="openstack/ceilometer-0" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.211773 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e369fc7c-044b-47cc-964f-601d7c06f150-config-data\") pod \"cinder-db-sync-995xv\" (UID: \"e369fc7c-044b-47cc-964f-601d7c06f150\") " pod="openstack/cinder-db-sync-995xv" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.211804 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e369fc7c-044b-47cc-964f-601d7c06f150-scripts\") pod \"cinder-db-sync-995xv\" (UID: \"e369fc7c-044b-47cc-964f-601d7c06f150\") " pod="openstack/cinder-db-sync-995xv" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.211855 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52f108a9-a567-4074-88db-05c8c2feea41-combined-ca-bundle\") pod \"barbican-db-sync-x6jrz\" (UID: \"52f108a9-a567-4074-88db-05c8c2feea41\") " pod="openstack/barbican-db-sync-x6jrz" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.211891 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zghrd\" (UniqueName: \"kubernetes.io/projected/52f108a9-a567-4074-88db-05c8c2feea41-kube-api-access-zghrd\") pod \"barbican-db-sync-x6jrz\" (UID: \"52f108a9-a567-4074-88db-05c8c2feea41\") " pod="openstack/barbican-db-sync-x6jrz" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.211940 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/30607c41-8b77-404d-80e2-905a915c8697-log-httpd\") pod \"ceilometer-0\" (UID: \"30607c41-8b77-404d-80e2-905a915c8697\") " pod="openstack/ceilometer-0" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.212479 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/30607c41-8b77-404d-80e2-905a915c8697-log-httpd\") pod \"ceilometer-0\" (UID: \"30607c41-8b77-404d-80e2-905a915c8697\") " pod="openstack/ceilometer-0" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.220591 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/30607c41-8b77-404d-80e2-905a915c8697-scripts\") pod \"ceilometer-0\" (UID: \"30607c41-8b77-404d-80e2-905a915c8697\") " pod="openstack/ceilometer-0" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.224396 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.225575 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-krlrd" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.225853 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.226305 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-fxqns"] Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.226754 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/30607c41-8b77-404d-80e2-905a915c8697-run-httpd\") pod \"ceilometer-0\" (UID: \"30607c41-8b77-404d-80e2-905a915c8697\") " pod="openstack/ceilometer-0" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.230236 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/30607c41-8b77-404d-80e2-905a915c8697-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"30607c41-8b77-404d-80e2-905a915c8697\") " pod="openstack/ceilometer-0" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.237751 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30607c41-8b77-404d-80e2-905a915c8697-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"30607c41-8b77-404d-80e2-905a915c8697\") " pod="openstack/ceilometer-0" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.250055 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/30607c41-8b77-404d-80e2-905a915c8697-config-data\") pod \"ceilometer-0\" (UID: \"30607c41-8b77-404d-80e2-905a915c8697\") " pod="openstack/ceilometer-0" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.259166 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6c9c9f998c-9twkk"] Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.297282 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-44t5h\" (UniqueName: \"kubernetes.io/projected/30607c41-8b77-404d-80e2-905a915c8697-kube-api-access-44t5h\") pod \"ceilometer-0\" (UID: \"30607c41-8b77-404d-80e2-905a915c8697\") " pod="openstack/ceilometer-0" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.316190 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6b6d6a49-2772-4d64-a665-618dfc7e2035-scripts\") pod \"placement-db-sync-fxqns\" (UID: \"6b6d6a49-2772-4d64-a665-618dfc7e2035\") " pod="openstack/placement-db-sync-fxqns" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.316234 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6b6d6a49-2772-4d64-a665-618dfc7e2035-logs\") pod \"placement-db-sync-fxqns\" (UID: \"6b6d6a49-2772-4d64-a665-618dfc7e2035\") " pod="openstack/placement-db-sync-fxqns" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.316267 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e369fc7c-044b-47cc-964f-601d7c06f150-combined-ca-bundle\") pod \"cinder-db-sync-995xv\" (UID: \"e369fc7c-044b-47cc-964f-601d7c06f150\") " pod="openstack/cinder-db-sync-995xv" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.316292 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b6d6a49-2772-4d64-a665-618dfc7e2035-config-data\") pod \"placement-db-sync-fxqns\" (UID: \"6b6d6a49-2772-4d64-a665-618dfc7e2035\") " pod="openstack/placement-db-sync-fxqns" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.316312 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gktpv\" (UniqueName: \"kubernetes.io/projected/6b6d6a49-2772-4d64-a665-618dfc7e2035-kube-api-access-gktpv\") pod \"placement-db-sync-fxqns\" (UID: \"6b6d6a49-2772-4d64-a665-618dfc7e2035\") " pod="openstack/placement-db-sync-fxqns" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.316333 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ql7wj\" (UniqueName: \"kubernetes.io/projected/e369fc7c-044b-47cc-964f-601d7c06f150-kube-api-access-ql7wj\") pod \"cinder-db-sync-995xv\" (UID: \"e369fc7c-044b-47cc-964f-601d7c06f150\") " pod="openstack/cinder-db-sync-995xv" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.316352 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/52f108a9-a567-4074-88db-05c8c2feea41-db-sync-config-data\") pod \"barbican-db-sync-x6jrz\" (UID: \"52f108a9-a567-4074-88db-05c8c2feea41\") " pod="openstack/barbican-db-sync-x6jrz" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.316380 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e369fc7c-044b-47cc-964f-601d7c06f150-db-sync-config-data\") pod \"cinder-db-sync-995xv\" (UID: \"e369fc7c-044b-47cc-964f-601d7c06f150\") " pod="openstack/cinder-db-sync-995xv" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.316409 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e369fc7c-044b-47cc-964f-601d7c06f150-etc-machine-id\") pod \"cinder-db-sync-995xv\" (UID: \"e369fc7c-044b-47cc-964f-601d7c06f150\") " pod="openstack/cinder-db-sync-995xv" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.316438 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b6d6a49-2772-4d64-a665-618dfc7e2035-combined-ca-bundle\") pod \"placement-db-sync-fxqns\" (UID: \"6b6d6a49-2772-4d64-a665-618dfc7e2035\") " pod="openstack/placement-db-sync-fxqns" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.316458 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e369fc7c-044b-47cc-964f-601d7c06f150-config-data\") pod \"cinder-db-sync-995xv\" (UID: \"e369fc7c-044b-47cc-964f-601d7c06f150\") " pod="openstack/cinder-db-sync-995xv" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.316478 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e369fc7c-044b-47cc-964f-601d7c06f150-scripts\") pod \"cinder-db-sync-995xv\" (UID: \"e369fc7c-044b-47cc-964f-601d7c06f150\") " pod="openstack/cinder-db-sync-995xv" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.316514 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52f108a9-a567-4074-88db-05c8c2feea41-combined-ca-bundle\") pod \"barbican-db-sync-x6jrz\" (UID: \"52f108a9-a567-4074-88db-05c8c2feea41\") " pod="openstack/barbican-db-sync-x6jrz" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.316539 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zghrd\" (UniqueName: \"kubernetes.io/projected/52f108a9-a567-4074-88db-05c8c2feea41-kube-api-access-zghrd\") pod \"barbican-db-sync-x6jrz\" (UID: \"52f108a9-a567-4074-88db-05c8c2feea41\") " pod="openstack/barbican-db-sync-x6jrz" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.327809 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-tjlrx"] Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.327890 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e369fc7c-044b-47cc-964f-601d7c06f150-etc-machine-id\") pod \"cinder-db-sync-995xv\" (UID: \"e369fc7c-044b-47cc-964f-601d7c06f150\") " pod="openstack/cinder-db-sync-995xv" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.329404 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57c957c4ff-tjlrx" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.343372 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-gm6fx" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.344450 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e369fc7c-044b-47cc-964f-601d7c06f150-db-sync-config-data\") pod \"cinder-db-sync-995xv\" (UID: \"e369fc7c-044b-47cc-964f-601d7c06f150\") " pod="openstack/cinder-db-sync-995xv" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.372049 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-895cf5cf-kdjcl" podUID="8411f55a-d436-4afe-a766-3f9f32fbdea5" containerName="dnsmasq-dns" containerID="cri-o://dfd4c48faeefcb7fd030cb2eeefcc956ec9fe6c993d10eab32610c40fdf47d2f" gracePeriod=10 Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.374345 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e369fc7c-044b-47cc-964f-601d7c06f150-scripts\") pod \"cinder-db-sync-995xv\" (UID: \"e369fc7c-044b-47cc-964f-601d7c06f150\") " pod="openstack/cinder-db-sync-995xv" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.374698 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/52f108a9-a567-4074-88db-05c8c2feea41-db-sync-config-data\") pod \"barbican-db-sync-x6jrz\" (UID: \"52f108a9-a567-4074-88db-05c8c2feea41\") " pod="openstack/barbican-db-sync-x6jrz" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.375140 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e369fc7c-044b-47cc-964f-601d7c06f150-combined-ca-bundle\") pod \"cinder-db-sync-995xv\" (UID: \"e369fc7c-044b-47cc-964f-601d7c06f150\") " pod="openstack/cinder-db-sync-995xv" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.375837 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e369fc7c-044b-47cc-964f-601d7c06f150-config-data\") pod \"cinder-db-sync-995xv\" (UID: \"e369fc7c-044b-47cc-964f-601d7c06f150\") " pod="openstack/cinder-db-sync-995xv" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.380978 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zghrd\" (UniqueName: \"kubernetes.io/projected/52f108a9-a567-4074-88db-05c8c2feea41-kube-api-access-zghrd\") pod \"barbican-db-sync-x6jrz\" (UID: \"52f108a9-a567-4074-88db-05c8c2feea41\") " pod="openstack/barbican-db-sync-x6jrz" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.394642 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ql7wj\" (UniqueName: \"kubernetes.io/projected/e369fc7c-044b-47cc-964f-601d7c06f150-kube-api-access-ql7wj\") pod \"cinder-db-sync-995xv\" (UID: \"e369fc7c-044b-47cc-964f-601d7c06f150\") " pod="openstack/cinder-db-sync-995xv" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.394717 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-tjlrx"] Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.397815 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52f108a9-a567-4074-88db-05c8c2feea41-combined-ca-bundle\") pod \"barbican-db-sync-x6jrz\" (UID: \"52f108a9-a567-4074-88db-05c8c2feea41\") " pod="openstack/barbican-db-sync-x6jrz" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.405557 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.433560 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b6d6a49-2772-4d64-a665-618dfc7e2035-combined-ca-bundle\") pod \"placement-db-sync-fxqns\" (UID: \"6b6d6a49-2772-4d64-a665-618dfc7e2035\") " pod="openstack/placement-db-sync-fxqns" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.448206 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bfa74c15-9387-4e75-8597-8ec28d5fea39-ovsdbserver-sb\") pod \"dnsmasq-dns-57c957c4ff-tjlrx\" (UID: \"bfa74c15-9387-4e75-8597-8ec28d5fea39\") " pod="openstack/dnsmasq-dns-57c957c4ff-tjlrx" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.448340 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bfa74c15-9387-4e75-8597-8ec28d5fea39-dns-svc\") pod \"dnsmasq-dns-57c957c4ff-tjlrx\" (UID: \"bfa74c15-9387-4e75-8597-8ec28d5fea39\") " pod="openstack/dnsmasq-dns-57c957c4ff-tjlrx" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.448467 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bfa74c15-9387-4e75-8597-8ec28d5fea39-config\") pod \"dnsmasq-dns-57c957c4ff-tjlrx\" (UID: \"bfa74c15-9387-4e75-8597-8ec28d5fea39\") " pod="openstack/dnsmasq-dns-57c957c4ff-tjlrx" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.448587 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6b6d6a49-2772-4d64-a665-618dfc7e2035-scripts\") pod \"placement-db-sync-fxqns\" (UID: \"6b6d6a49-2772-4d64-a665-618dfc7e2035\") " pod="openstack/placement-db-sync-fxqns" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.448667 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bfa74c15-9387-4e75-8597-8ec28d5fea39-ovsdbserver-nb\") pod \"dnsmasq-dns-57c957c4ff-tjlrx\" (UID: \"bfa74c15-9387-4e75-8597-8ec28d5fea39\") " pod="openstack/dnsmasq-dns-57c957c4ff-tjlrx" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.448739 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6b6d6a49-2772-4d64-a665-618dfc7e2035-logs\") pod \"placement-db-sync-fxqns\" (UID: \"6b6d6a49-2772-4d64-a665-618dfc7e2035\") " pod="openstack/placement-db-sync-fxqns" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.448851 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bfa74c15-9387-4e75-8597-8ec28d5fea39-dns-swift-storage-0\") pod \"dnsmasq-dns-57c957c4ff-tjlrx\" (UID: \"bfa74c15-9387-4e75-8597-8ec28d5fea39\") " pod="openstack/dnsmasq-dns-57c957c4ff-tjlrx" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.448994 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b6d6a49-2772-4d64-a665-618dfc7e2035-config-data\") pod \"placement-db-sync-fxqns\" (UID: \"6b6d6a49-2772-4d64-a665-618dfc7e2035\") " pod="openstack/placement-db-sync-fxqns" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.449072 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gktpv\" (UniqueName: \"kubernetes.io/projected/6b6d6a49-2772-4d64-a665-618dfc7e2035-kube-api-access-gktpv\") pod \"placement-db-sync-fxqns\" (UID: \"6b6d6a49-2772-4d64-a665-618dfc7e2035\") " pod="openstack/placement-db-sync-fxqns" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.449205 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5vmn7\" (UniqueName: \"kubernetes.io/projected/bfa74c15-9387-4e75-8597-8ec28d5fea39-kube-api-access-5vmn7\") pod \"dnsmasq-dns-57c957c4ff-tjlrx\" (UID: \"bfa74c15-9387-4e75-8597-8ec28d5fea39\") " pod="openstack/dnsmasq-dns-57c957c4ff-tjlrx" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.437395 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b6d6a49-2772-4d64-a665-618dfc7e2035-combined-ca-bundle\") pod \"placement-db-sync-fxqns\" (UID: \"6b6d6a49-2772-4d64-a665-618dfc7e2035\") " pod="openstack/placement-db-sync-fxqns" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.451194 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6b6d6a49-2772-4d64-a665-618dfc7e2035-logs\") pod \"placement-db-sync-fxqns\" (UID: \"6b6d6a49-2772-4d64-a665-618dfc7e2035\") " pod="openstack/placement-db-sync-fxqns" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.454006 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b6d6a49-2772-4d64-a665-618dfc7e2035-config-data\") pod \"placement-db-sync-fxqns\" (UID: \"6b6d6a49-2772-4d64-a665-618dfc7e2035\") " pod="openstack/placement-db-sync-fxqns" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.457393 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6b6d6a49-2772-4d64-a665-618dfc7e2035-scripts\") pod \"placement-db-sync-fxqns\" (UID: \"6b6d6a49-2772-4d64-a665-618dfc7e2035\") " pod="openstack/placement-db-sync-fxqns" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.485817 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gktpv\" (UniqueName: \"kubernetes.io/projected/6b6d6a49-2772-4d64-a665-618dfc7e2035-kube-api-access-gktpv\") pod \"placement-db-sync-fxqns\" (UID: \"6b6d6a49-2772-4d64-a665-618dfc7e2035\") " pod="openstack/placement-db-sync-fxqns" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.528869 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-995xv" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.558928 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bfa74c15-9387-4e75-8597-8ec28d5fea39-ovsdbserver-sb\") pod \"dnsmasq-dns-57c957c4ff-tjlrx\" (UID: \"bfa74c15-9387-4e75-8597-8ec28d5fea39\") " pod="openstack/dnsmasq-dns-57c957c4ff-tjlrx" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.560377 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bfa74c15-9387-4e75-8597-8ec28d5fea39-dns-svc\") pod \"dnsmasq-dns-57c957c4ff-tjlrx\" (UID: \"bfa74c15-9387-4e75-8597-8ec28d5fea39\") " pod="openstack/dnsmasq-dns-57c957c4ff-tjlrx" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.560476 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bfa74c15-9387-4e75-8597-8ec28d5fea39-config\") pod \"dnsmasq-dns-57c957c4ff-tjlrx\" (UID: \"bfa74c15-9387-4e75-8597-8ec28d5fea39\") " pod="openstack/dnsmasq-dns-57c957c4ff-tjlrx" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.560335 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bfa74c15-9387-4e75-8597-8ec28d5fea39-ovsdbserver-sb\") pod \"dnsmasq-dns-57c957c4ff-tjlrx\" (UID: \"bfa74c15-9387-4e75-8597-8ec28d5fea39\") " pod="openstack/dnsmasq-dns-57c957c4ff-tjlrx" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.561199 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bfa74c15-9387-4e75-8597-8ec28d5fea39-dns-svc\") pod \"dnsmasq-dns-57c957c4ff-tjlrx\" (UID: \"bfa74c15-9387-4e75-8597-8ec28d5fea39\") " pod="openstack/dnsmasq-dns-57c957c4ff-tjlrx" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.562384 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bfa74c15-9387-4e75-8597-8ec28d5fea39-config\") pod \"dnsmasq-dns-57c957c4ff-tjlrx\" (UID: \"bfa74c15-9387-4e75-8597-8ec28d5fea39\") " pod="openstack/dnsmasq-dns-57c957c4ff-tjlrx" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.562542 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bfa74c15-9387-4e75-8597-8ec28d5fea39-ovsdbserver-nb\") pod \"dnsmasq-dns-57c957c4ff-tjlrx\" (UID: \"bfa74c15-9387-4e75-8597-8ec28d5fea39\") " pod="openstack/dnsmasq-dns-57c957c4ff-tjlrx" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.562630 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bfa74c15-9387-4e75-8597-8ec28d5fea39-dns-swift-storage-0\") pod \"dnsmasq-dns-57c957c4ff-tjlrx\" (UID: \"bfa74c15-9387-4e75-8597-8ec28d5fea39\") " pod="openstack/dnsmasq-dns-57c957c4ff-tjlrx" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.562779 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5vmn7\" (UniqueName: \"kubernetes.io/projected/bfa74c15-9387-4e75-8597-8ec28d5fea39-kube-api-access-5vmn7\") pod \"dnsmasq-dns-57c957c4ff-tjlrx\" (UID: \"bfa74c15-9387-4e75-8597-8ec28d5fea39\") " pod="openstack/dnsmasq-dns-57c957c4ff-tjlrx" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.563794 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bfa74c15-9387-4e75-8597-8ec28d5fea39-ovsdbserver-nb\") pod \"dnsmasq-dns-57c957c4ff-tjlrx\" (UID: \"bfa74c15-9387-4e75-8597-8ec28d5fea39\") " pod="openstack/dnsmasq-dns-57c957c4ff-tjlrx" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.564552 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bfa74c15-9387-4e75-8597-8ec28d5fea39-dns-swift-storage-0\") pod \"dnsmasq-dns-57c957c4ff-tjlrx\" (UID: \"bfa74c15-9387-4e75-8597-8ec28d5fea39\") " pod="openstack/dnsmasq-dns-57c957c4ff-tjlrx" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.582306 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-x6jrz" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.598201 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5vmn7\" (UniqueName: \"kubernetes.io/projected/bfa74c15-9387-4e75-8597-8ec28d5fea39-kube-api-access-5vmn7\") pod \"dnsmasq-dns-57c957c4ff-tjlrx\" (UID: \"bfa74c15-9387-4e75-8597-8ec28d5fea39\") " pod="openstack/dnsmasq-dns-57c957c4ff-tjlrx" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.637136 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-fxqns" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.715679 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57c957c4ff-tjlrx" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.863272 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-7k8dl"] Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.863360 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 21 15:44:20 crc kubenswrapper[5021]: W0121 15:44:20.873135 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf72eda61_8750_46bd_88ce_355f8b36acc7.slice/crio-260db40410ee645b5827bbaa708a5a36009fc69e8e2ada09c1b37064603c15ce WatchSource:0}: Error finding container 260db40410ee645b5827bbaa708a5a36009fc69e8e2ada09c1b37064603c15ce: Status 404 returned error can't find the container with id 260db40410ee645b5827bbaa708a5a36009fc69e8e2ada09c1b37064603c15ce Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.887541 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.887662 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.903768 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.906142 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-d767q" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.906230 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 21 15:44:20 crc kubenswrapper[5021]: I0121 15:44:20.918482 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.003573 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.006074 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"7ecd501c-7632-497e-9f32-5ca8b60ea2d2\") " pod="openstack/glance-default-external-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.007470 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/7ecd501c-7632-497e-9f32-5ca8b60ea2d2-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"7ecd501c-7632-497e-9f32-5ca8b60ea2d2\") " pod="openstack/glance-default-external-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.007639 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ecd501c-7632-497e-9f32-5ca8b60ea2d2-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"7ecd501c-7632-497e-9f32-5ca8b60ea2d2\") " pod="openstack/glance-default-external-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.007751 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7ecd501c-7632-497e-9f32-5ca8b60ea2d2-scripts\") pod \"glance-default-external-api-0\" (UID: \"7ecd501c-7632-497e-9f32-5ca8b60ea2d2\") " pod="openstack/glance-default-external-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.007850 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ecd501c-7632-497e-9f32-5ca8b60ea2d2-config-data\") pod \"glance-default-external-api-0\" (UID: \"7ecd501c-7632-497e-9f32-5ca8b60ea2d2\") " pod="openstack/glance-default-external-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.006203 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.008000 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7ecd501c-7632-497e-9f32-5ca8b60ea2d2-logs\") pod \"glance-default-external-api-0\" (UID: \"7ecd501c-7632-497e-9f32-5ca8b60ea2d2\") " pod="openstack/glance-default-external-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.008759 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ecd501c-7632-497e-9f32-5ca8b60ea2d2-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"7ecd501c-7632-497e-9f32-5ca8b60ea2d2\") " pod="openstack/glance-default-external-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.008867 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6c55m\" (UniqueName: \"kubernetes.io/projected/7ecd501c-7632-497e-9f32-5ca8b60ea2d2-kube-api-access-6c55m\") pod \"glance-default-external-api-0\" (UID: \"7ecd501c-7632-497e-9f32-5ca8b60ea2d2\") " pod="openstack/glance-default-external-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.012313 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.021437 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.022396 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.102637 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6c9c9f998c-9twkk"] Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.109887 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6c55m\" (UniqueName: \"kubernetes.io/projected/7ecd501c-7632-497e-9f32-5ca8b60ea2d2-kube-api-access-6c55m\") pod \"glance-default-external-api-0\" (UID: \"7ecd501c-7632-497e-9f32-5ca8b60ea2d2\") " pod="openstack/glance-default-external-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.109955 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a2b22e1b-3ad7-4028-9c61-59365c57bd4b-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"a2b22e1b-3ad7-4028-9c61-59365c57bd4b\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.110031 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"7ecd501c-7632-497e-9f32-5ca8b60ea2d2\") " pod="openstack/glance-default-external-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.110074 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a2b22e1b-3ad7-4028-9c61-59365c57bd4b-logs\") pod \"glance-default-internal-api-0\" (UID: \"a2b22e1b-3ad7-4028-9c61-59365c57bd4b\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.110105 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-52xs8\" (UniqueName: \"kubernetes.io/projected/a2b22e1b-3ad7-4028-9c61-59365c57bd4b-kube-api-access-52xs8\") pod \"glance-default-internal-api-0\" (UID: \"a2b22e1b-3ad7-4028-9c61-59365c57bd4b\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.110137 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a2b22e1b-3ad7-4028-9c61-59365c57bd4b-scripts\") pod \"glance-default-internal-api-0\" (UID: \"a2b22e1b-3ad7-4028-9c61-59365c57bd4b\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.110167 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/7ecd501c-7632-497e-9f32-5ca8b60ea2d2-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"7ecd501c-7632-497e-9f32-5ca8b60ea2d2\") " pod="openstack/glance-default-external-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.110189 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a2b22e1b-3ad7-4028-9c61-59365c57bd4b-config-data\") pod \"glance-default-internal-api-0\" (UID: \"a2b22e1b-3ad7-4028-9c61-59365c57bd4b\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.110207 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-internal-api-0\" (UID: \"a2b22e1b-3ad7-4028-9c61-59365c57bd4b\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.110228 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ecd501c-7632-497e-9f32-5ca8b60ea2d2-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"7ecd501c-7632-497e-9f32-5ca8b60ea2d2\") " pod="openstack/glance-default-external-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.110263 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a2b22e1b-3ad7-4028-9c61-59365c57bd4b-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"a2b22e1b-3ad7-4028-9c61-59365c57bd4b\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.110921 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2b22e1b-3ad7-4028-9c61-59365c57bd4b-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"a2b22e1b-3ad7-4028-9c61-59365c57bd4b\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.110955 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7ecd501c-7632-497e-9f32-5ca8b60ea2d2-scripts\") pod \"glance-default-external-api-0\" (UID: \"7ecd501c-7632-497e-9f32-5ca8b60ea2d2\") " pod="openstack/glance-default-external-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.110990 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ecd501c-7632-497e-9f32-5ca8b60ea2d2-config-data\") pod \"glance-default-external-api-0\" (UID: \"7ecd501c-7632-497e-9f32-5ca8b60ea2d2\") " pod="openstack/glance-default-external-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.111006 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7ecd501c-7632-497e-9f32-5ca8b60ea2d2-logs\") pod \"glance-default-external-api-0\" (UID: \"7ecd501c-7632-497e-9f32-5ca8b60ea2d2\") " pod="openstack/glance-default-external-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.111558 5021 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"7ecd501c-7632-497e-9f32-5ca8b60ea2d2\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/glance-default-external-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.111612 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/7ecd501c-7632-497e-9f32-5ca8b60ea2d2-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"7ecd501c-7632-497e-9f32-5ca8b60ea2d2\") " pod="openstack/glance-default-external-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.111697 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ecd501c-7632-497e-9f32-5ca8b60ea2d2-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"7ecd501c-7632-497e-9f32-5ca8b60ea2d2\") " pod="openstack/glance-default-external-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.111711 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7ecd501c-7632-497e-9f32-5ca8b60ea2d2-logs\") pod \"glance-default-external-api-0\" (UID: \"7ecd501c-7632-497e-9f32-5ca8b60ea2d2\") " pod="openstack/glance-default-external-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.125309 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ecd501c-7632-497e-9f32-5ca8b60ea2d2-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"7ecd501c-7632-497e-9f32-5ca8b60ea2d2\") " pod="openstack/glance-default-external-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.127496 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7ecd501c-7632-497e-9f32-5ca8b60ea2d2-scripts\") pod \"glance-default-external-api-0\" (UID: \"7ecd501c-7632-497e-9f32-5ca8b60ea2d2\") " pod="openstack/glance-default-external-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.129668 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ecd501c-7632-497e-9f32-5ca8b60ea2d2-config-data\") pod \"glance-default-external-api-0\" (UID: \"7ecd501c-7632-497e-9f32-5ca8b60ea2d2\") " pod="openstack/glance-default-external-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.132996 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ecd501c-7632-497e-9f32-5ca8b60ea2d2-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"7ecd501c-7632-497e-9f32-5ca8b60ea2d2\") " pod="openstack/glance-default-external-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.148986 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"7ecd501c-7632-497e-9f32-5ca8b60ea2d2\") " pod="openstack/glance-default-external-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.149233 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6c55m\" (UniqueName: \"kubernetes.io/projected/7ecd501c-7632-497e-9f32-5ca8b60ea2d2-kube-api-access-6c55m\") pod \"glance-default-external-api-0\" (UID: \"7ecd501c-7632-497e-9f32-5ca8b60ea2d2\") " pod="openstack/glance-default-external-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.215001 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a2b22e1b-3ad7-4028-9c61-59365c57bd4b-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"a2b22e1b-3ad7-4028-9c61-59365c57bd4b\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.216182 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a2b22e1b-3ad7-4028-9c61-59365c57bd4b-logs\") pod \"glance-default-internal-api-0\" (UID: \"a2b22e1b-3ad7-4028-9c61-59365c57bd4b\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.216561 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a2b22e1b-3ad7-4028-9c61-59365c57bd4b-logs\") pod \"glance-default-internal-api-0\" (UID: \"a2b22e1b-3ad7-4028-9c61-59365c57bd4b\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.216931 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-52xs8\" (UniqueName: \"kubernetes.io/projected/a2b22e1b-3ad7-4028-9c61-59365c57bd4b-kube-api-access-52xs8\") pod \"glance-default-internal-api-0\" (UID: \"a2b22e1b-3ad7-4028-9c61-59365c57bd4b\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.216973 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a2b22e1b-3ad7-4028-9c61-59365c57bd4b-scripts\") pod \"glance-default-internal-api-0\" (UID: \"a2b22e1b-3ad7-4028-9c61-59365c57bd4b\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.217004 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a2b22e1b-3ad7-4028-9c61-59365c57bd4b-config-data\") pod \"glance-default-internal-api-0\" (UID: \"a2b22e1b-3ad7-4028-9c61-59365c57bd4b\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.217026 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-internal-api-0\" (UID: \"a2b22e1b-3ad7-4028-9c61-59365c57bd4b\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.217086 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a2b22e1b-3ad7-4028-9c61-59365c57bd4b-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"a2b22e1b-3ad7-4028-9c61-59365c57bd4b\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.217115 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2b22e1b-3ad7-4028-9c61-59365c57bd4b-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"a2b22e1b-3ad7-4028-9c61-59365c57bd4b\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.217336 5021 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-internal-api-0\" (UID: \"a2b22e1b-3ad7-4028-9c61-59365c57bd4b\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/glance-default-internal-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.220516 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a2b22e1b-3ad7-4028-9c61-59365c57bd4b-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"a2b22e1b-3ad7-4028-9c61-59365c57bd4b\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.231797 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a2b22e1b-3ad7-4028-9c61-59365c57bd4b-scripts\") pod \"glance-default-internal-api-0\" (UID: \"a2b22e1b-3ad7-4028-9c61-59365c57bd4b\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.232865 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a2b22e1b-3ad7-4028-9c61-59365c57bd4b-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"a2b22e1b-3ad7-4028-9c61-59365c57bd4b\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.250875 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a2b22e1b-3ad7-4028-9c61-59365c57bd4b-config-data\") pod \"glance-default-internal-api-0\" (UID: \"a2b22e1b-3ad7-4028-9c61-59365c57bd4b\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.251491 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2b22e1b-3ad7-4028-9c61-59365c57bd4b-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"a2b22e1b-3ad7-4028-9c61-59365c57bd4b\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.259150 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-52xs8\" (UniqueName: \"kubernetes.io/projected/a2b22e1b-3ad7-4028-9c61-59365c57bd4b-kube-api-access-52xs8\") pod \"glance-default-internal-api-0\" (UID: \"a2b22e1b-3ad7-4028-9c61-59365c57bd4b\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.296671 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-internal-api-0\" (UID: \"a2b22e1b-3ad7-4028-9c61-59365c57bd4b\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.366534 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.388784 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-7k8dl" event={"ID":"f72eda61-8750-46bd-88ce-355f8b36acc7","Type":"ContainerStarted","Data":"260db40410ee645b5827bbaa708a5a36009fc69e8e2ada09c1b37064603c15ce"} Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.395546 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6c9c9f998c-9twkk" event={"ID":"bec74521-464f-4b1e-bf55-7d2434aa4c10","Type":"ContainerStarted","Data":"facd119ebcc35ed55ddca372612dbf3ab114acd195deab2c0264f5715e1edd29"} Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.398956 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.399175 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"30607c41-8b77-404d-80e2-905a915c8697","Type":"ContainerStarted","Data":"b91a2c7ec6e85ea619e3e07e8a7d69a7b6ee1f11f457ae5355df2a8fd867f6c7"} Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.405174 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.637989 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-tjlrx"] Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.659575 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-gm6fx"] Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.668265 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-x6jrz"] Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.678655 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-995xv"] Jan 21 15:44:21 crc kubenswrapper[5021]: W0121 15:44:21.702418 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbfa74c15_9387_4e75_8597_8ec28d5fea39.slice/crio-eb2ee5a9b475340c216165e1e754c2582dfa35e9d93cb1d5f3c4628b832dc193 WatchSource:0}: Error finding container eb2ee5a9b475340c216165e1e754c2582dfa35e9d93cb1d5f3c4628b832dc193: Status 404 returned error can't find the container with id eb2ee5a9b475340c216165e1e754c2582dfa35e9d93cb1d5f3c4628b832dc193 Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.786992 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-fxqns"] Jan 21 15:44:21 crc kubenswrapper[5021]: W0121 15:44:21.794573 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6b6d6a49_2772_4d64_a665_618dfc7e2035.slice/crio-33aba1c5d555e73151019244279a7016b48265a078363c9435d0922ff40a5ad5 WatchSource:0}: Error finding container 33aba1c5d555e73151019244279a7016b48265a078363c9435d0922ff40a5ad5: Status 404 returned error can't find the container with id 33aba1c5d555e73151019244279a7016b48265a078363c9435d0922ff40a5ad5 Jan 21 15:44:21 crc kubenswrapper[5021]: I0121 15:44:21.897049 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 21 15:44:21 crc kubenswrapper[5021]: W0121 15:44:21.898142 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda2b22e1b_3ad7_4028_9c61_59365c57bd4b.slice/crio-b1501fc0076db307445e1f7392c17f670a47faaead5b04c409eb6980a4d63111 WatchSource:0}: Error finding container b1501fc0076db307445e1f7392c17f670a47faaead5b04c409eb6980a4d63111: Status 404 returned error can't find the container with id b1501fc0076db307445e1f7392c17f670a47faaead5b04c409eb6980a4d63111 Jan 21 15:44:22 crc kubenswrapper[5021]: I0121 15:44:22.134456 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 21 15:44:22 crc kubenswrapper[5021]: I0121 15:44:22.414156 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-fxqns" event={"ID":"6b6d6a49-2772-4d64-a665-618dfc7e2035","Type":"ContainerStarted","Data":"33aba1c5d555e73151019244279a7016b48265a078363c9435d0922ff40a5ad5"} Jan 21 15:44:22 crc kubenswrapper[5021]: I0121 15:44:22.416342 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"a2b22e1b-3ad7-4028-9c61-59365c57bd4b","Type":"ContainerStarted","Data":"b1501fc0076db307445e1f7392c17f670a47faaead5b04c409eb6980a4d63111"} Jan 21 15:44:22 crc kubenswrapper[5021]: I0121 15:44:22.419173 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-x6jrz" event={"ID":"52f108a9-a567-4074-88db-05c8c2feea41","Type":"ContainerStarted","Data":"cb1a8f1cbbd9151698bb92dbab98cb8004523fcd3ebf38b406475d5452855d34"} Jan 21 15:44:22 crc kubenswrapper[5021]: I0121 15:44:22.420870 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57c957c4ff-tjlrx" event={"ID":"bfa74c15-9387-4e75-8597-8ec28d5fea39","Type":"ContainerStarted","Data":"eb2ee5a9b475340c216165e1e754c2582dfa35e9d93cb1d5f3c4628b832dc193"} Jan 21 15:44:22 crc kubenswrapper[5021]: I0121 15:44:22.422680 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-995xv" event={"ID":"e369fc7c-044b-47cc-964f-601d7c06f150","Type":"ContainerStarted","Data":"f993a4696d15902b0ea64e9d041d0d825a69de9e4e7ec0d11929ce1dac2b6193"} Jan 21 15:44:22 crc kubenswrapper[5021]: I0121 15:44:22.427647 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"7ecd501c-7632-497e-9f32-5ca8b60ea2d2","Type":"ContainerStarted","Data":"30559304ec4940eca42b34a1e87f2a12837f618c680caee2244ad5b1cc78fad7"} Jan 21 15:44:22 crc kubenswrapper[5021]: I0121 15:44:22.435405 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-gm6fx" event={"ID":"cb49e3b7-78e5-4094-9bf0-d25f350d70a2","Type":"ContainerStarted","Data":"3b94e4226aa112d7f8b06e17e98c660f45a98d12937ad51b10b9f633028499ec"} Jan 21 15:44:22 crc kubenswrapper[5021]: I0121 15:44:22.860465 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 21 15:44:22 crc kubenswrapper[5021]: I0121 15:44:22.883542 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 21 15:44:22 crc kubenswrapper[5021]: I0121 15:44:22.954614 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 21 15:44:24 crc kubenswrapper[5021]: I0121 15:44:24.102281 5021 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-895cf5cf-kdjcl" podUID="8411f55a-d436-4afe-a766-3f9f32fbdea5" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.139:5353: connect: connection refused" Jan 21 15:44:24 crc kubenswrapper[5021]: I0121 15:44:24.541213 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"7ecd501c-7632-497e-9f32-5ca8b60ea2d2","Type":"ContainerStarted","Data":"2222207d1f91ccab2e1591175304e36104367aaaea7641b601745cce585ca9c3"} Jan 21 15:44:24 crc kubenswrapper[5021]: I0121 15:44:24.543673 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-gm6fx" event={"ID":"cb49e3b7-78e5-4094-9bf0-d25f350d70a2","Type":"ContainerStarted","Data":"4da65adcb8ccfeb9a613436dbfa586305e06ea8f16f79bf23435b620e6b5c598"} Jan 21 15:44:24 crc kubenswrapper[5021]: I0121 15:44:24.546929 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"a2b22e1b-3ad7-4028-9c61-59365c57bd4b","Type":"ContainerStarted","Data":"1381cb9e98b9d58aee20132248f39b0e3835aeefc14aa3be3101489ad1904c97"} Jan 21 15:44:24 crc kubenswrapper[5021]: I0121 15:44:24.572756 5021 generic.go:334] "Generic (PLEG): container finished" podID="8411f55a-d436-4afe-a766-3f9f32fbdea5" containerID="dfd4c48faeefcb7fd030cb2eeefcc956ec9fe6c993d10eab32610c40fdf47d2f" exitCode=0 Jan 21 15:44:24 crc kubenswrapper[5021]: I0121 15:44:24.572805 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-895cf5cf-kdjcl" event={"ID":"8411f55a-d436-4afe-a766-3f9f32fbdea5","Type":"ContainerDied","Data":"dfd4c48faeefcb7fd030cb2eeefcc956ec9fe6c993d10eab32610c40fdf47d2f"} Jan 21 15:44:24 crc kubenswrapper[5021]: I0121 15:44:24.580157 5021 generic.go:334] "Generic (PLEG): container finished" podID="bfa74c15-9387-4e75-8597-8ec28d5fea39" containerID="83989d4993d161ae52f992021ec35d82a1a4a79be3a4760f4eb044f8cf112c45" exitCode=0 Jan 21 15:44:24 crc kubenswrapper[5021]: I0121 15:44:24.580359 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57c957c4ff-tjlrx" event={"ID":"bfa74c15-9387-4e75-8597-8ec28d5fea39","Type":"ContainerDied","Data":"83989d4993d161ae52f992021ec35d82a1a4a79be3a4760f4eb044f8cf112c45"} Jan 21 15:44:24 crc kubenswrapper[5021]: I0121 15:44:24.589444 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-7k8dl" event={"ID":"f72eda61-8750-46bd-88ce-355f8b36acc7","Type":"ContainerStarted","Data":"5819573d359e2d9a1b65039d98dc4e760af3f22687fb7cd64949bd947e539de2"} Jan 21 15:44:24 crc kubenswrapper[5021]: I0121 15:44:24.604330 5021 generic.go:334] "Generic (PLEG): container finished" podID="bec74521-464f-4b1e-bf55-7d2434aa4c10" containerID="7cf923c91ca02aef25d6abc95eadb6002daf816bebc497f2955ff9354722d8a9" exitCode=0 Jan 21 15:44:24 crc kubenswrapper[5021]: I0121 15:44:24.604375 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6c9c9f998c-9twkk" event={"ID":"bec74521-464f-4b1e-bf55-7d2434aa4c10","Type":"ContainerDied","Data":"7cf923c91ca02aef25d6abc95eadb6002daf816bebc497f2955ff9354722d8a9"} Jan 21 15:44:24 crc kubenswrapper[5021]: I0121 15:44:24.611415 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-gm6fx" podStartSLOduration=5.611396637 podStartE2EDuration="5.611396637s" podCreationTimestamp="2026-01-21 15:44:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:44:24.570185129 +0000 UTC m=+1206.105299018" watchObservedRunningTime="2026-01-21 15:44:24.611396637 +0000 UTC m=+1206.146510526" Jan 21 15:44:24 crc kubenswrapper[5021]: I0121 15:44:24.629425 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-7k8dl" podStartSLOduration=5.629408641 podStartE2EDuration="5.629408641s" podCreationTimestamp="2026-01-21 15:44:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:44:24.628222569 +0000 UTC m=+1206.163336458" watchObservedRunningTime="2026-01-21 15:44:24.629408641 +0000 UTC m=+1206.164522530" Jan 21 15:44:25 crc kubenswrapper[5021]: I0121 15:44:25.112516 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-895cf5cf-kdjcl" Jan 21 15:44:25 crc kubenswrapper[5021]: I0121 15:44:25.174552 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6c9c9f998c-9twkk" Jan 21 15:44:25 crc kubenswrapper[5021]: I0121 15:44:25.218591 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8411f55a-d436-4afe-a766-3f9f32fbdea5-dns-swift-storage-0\") pod \"8411f55a-d436-4afe-a766-3f9f32fbdea5\" (UID: \"8411f55a-d436-4afe-a766-3f9f32fbdea5\") " Jan 21 15:44:25 crc kubenswrapper[5021]: I0121 15:44:25.218707 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8411f55a-d436-4afe-a766-3f9f32fbdea5-ovsdbserver-nb\") pod \"8411f55a-d436-4afe-a766-3f9f32fbdea5\" (UID: \"8411f55a-d436-4afe-a766-3f9f32fbdea5\") " Jan 21 15:44:25 crc kubenswrapper[5021]: I0121 15:44:25.218753 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8411f55a-d436-4afe-a766-3f9f32fbdea5-ovsdbserver-sb\") pod \"8411f55a-d436-4afe-a766-3f9f32fbdea5\" (UID: \"8411f55a-d436-4afe-a766-3f9f32fbdea5\") " Jan 21 15:44:25 crc kubenswrapper[5021]: I0121 15:44:25.218781 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bec74521-464f-4b1e-bf55-7d2434aa4c10-dns-svc\") pod \"bec74521-464f-4b1e-bf55-7d2434aa4c10\" (UID: \"bec74521-464f-4b1e-bf55-7d2434aa4c10\") " Jan 21 15:44:25 crc kubenswrapper[5021]: I0121 15:44:25.218808 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bec74521-464f-4b1e-bf55-7d2434aa4c10-ovsdbserver-sb\") pod \"bec74521-464f-4b1e-bf55-7d2434aa4c10\" (UID: \"bec74521-464f-4b1e-bf55-7d2434aa4c10\") " Jan 21 15:44:25 crc kubenswrapper[5021]: I0121 15:44:25.218854 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8411f55a-d436-4afe-a766-3f9f32fbdea5-dns-svc\") pod \"8411f55a-d436-4afe-a766-3f9f32fbdea5\" (UID: \"8411f55a-d436-4afe-a766-3f9f32fbdea5\") " Jan 21 15:44:25 crc kubenswrapper[5021]: I0121 15:44:25.218889 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bec74521-464f-4b1e-bf55-7d2434aa4c10-ovsdbserver-nb\") pod \"bec74521-464f-4b1e-bf55-7d2434aa4c10\" (UID: \"bec74521-464f-4b1e-bf55-7d2434aa4c10\") " Jan 21 15:44:25 crc kubenswrapper[5021]: I0121 15:44:25.218991 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bec74521-464f-4b1e-bf55-7d2434aa4c10-config\") pod \"bec74521-464f-4b1e-bf55-7d2434aa4c10\" (UID: \"bec74521-464f-4b1e-bf55-7d2434aa4c10\") " Jan 21 15:44:25 crc kubenswrapper[5021]: I0121 15:44:25.219084 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bec74521-464f-4b1e-bf55-7d2434aa4c10-dns-swift-storage-0\") pod \"bec74521-464f-4b1e-bf55-7d2434aa4c10\" (UID: \"bec74521-464f-4b1e-bf55-7d2434aa4c10\") " Jan 21 15:44:25 crc kubenswrapper[5021]: I0121 15:44:25.219112 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-57l5c\" (UniqueName: \"kubernetes.io/projected/8411f55a-d436-4afe-a766-3f9f32fbdea5-kube-api-access-57l5c\") pod \"8411f55a-d436-4afe-a766-3f9f32fbdea5\" (UID: \"8411f55a-d436-4afe-a766-3f9f32fbdea5\") " Jan 21 15:44:25 crc kubenswrapper[5021]: I0121 15:44:25.219203 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cnhl5\" (UniqueName: \"kubernetes.io/projected/bec74521-464f-4b1e-bf55-7d2434aa4c10-kube-api-access-cnhl5\") pod \"bec74521-464f-4b1e-bf55-7d2434aa4c10\" (UID: \"bec74521-464f-4b1e-bf55-7d2434aa4c10\") " Jan 21 15:44:25 crc kubenswrapper[5021]: I0121 15:44:25.219262 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8411f55a-d436-4afe-a766-3f9f32fbdea5-config\") pod \"8411f55a-d436-4afe-a766-3f9f32fbdea5\" (UID: \"8411f55a-d436-4afe-a766-3f9f32fbdea5\") " Jan 21 15:44:25 crc kubenswrapper[5021]: I0121 15:44:25.235482 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8411f55a-d436-4afe-a766-3f9f32fbdea5-kube-api-access-57l5c" (OuterVolumeSpecName: "kube-api-access-57l5c") pod "8411f55a-d436-4afe-a766-3f9f32fbdea5" (UID: "8411f55a-d436-4afe-a766-3f9f32fbdea5"). InnerVolumeSpecName "kube-api-access-57l5c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:44:25 crc kubenswrapper[5021]: I0121 15:44:25.268954 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bec74521-464f-4b1e-bf55-7d2434aa4c10-kube-api-access-cnhl5" (OuterVolumeSpecName: "kube-api-access-cnhl5") pod "bec74521-464f-4b1e-bf55-7d2434aa4c10" (UID: "bec74521-464f-4b1e-bf55-7d2434aa4c10"). InnerVolumeSpecName "kube-api-access-cnhl5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:44:25 crc kubenswrapper[5021]: I0121 15:44:25.301014 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bec74521-464f-4b1e-bf55-7d2434aa4c10-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "bec74521-464f-4b1e-bf55-7d2434aa4c10" (UID: "bec74521-464f-4b1e-bf55-7d2434aa4c10"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:44:25 crc kubenswrapper[5021]: I0121 15:44:25.311852 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bec74521-464f-4b1e-bf55-7d2434aa4c10-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "bec74521-464f-4b1e-bf55-7d2434aa4c10" (UID: "bec74521-464f-4b1e-bf55-7d2434aa4c10"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:44:25 crc kubenswrapper[5021]: I0121 15:44:25.317021 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bec74521-464f-4b1e-bf55-7d2434aa4c10-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "bec74521-464f-4b1e-bf55-7d2434aa4c10" (UID: "bec74521-464f-4b1e-bf55-7d2434aa4c10"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:44:25 crc kubenswrapper[5021]: I0121 15:44:25.321104 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-57l5c\" (UniqueName: \"kubernetes.io/projected/8411f55a-d436-4afe-a766-3f9f32fbdea5-kube-api-access-57l5c\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:25 crc kubenswrapper[5021]: I0121 15:44:25.321415 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cnhl5\" (UniqueName: \"kubernetes.io/projected/bec74521-464f-4b1e-bf55-7d2434aa4c10-kube-api-access-cnhl5\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:25 crc kubenswrapper[5021]: I0121 15:44:25.321494 5021 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bec74521-464f-4b1e-bf55-7d2434aa4c10-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:25 crc kubenswrapper[5021]: I0121 15:44:25.321561 5021 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bec74521-464f-4b1e-bf55-7d2434aa4c10-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:25 crc kubenswrapper[5021]: I0121 15:44:25.321679 5021 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bec74521-464f-4b1e-bf55-7d2434aa4c10-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:25 crc kubenswrapper[5021]: I0121 15:44:25.330344 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bec74521-464f-4b1e-bf55-7d2434aa4c10-config" (OuterVolumeSpecName: "config") pod "bec74521-464f-4b1e-bf55-7d2434aa4c10" (UID: "bec74521-464f-4b1e-bf55-7d2434aa4c10"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:44:25 crc kubenswrapper[5021]: I0121 15:44:25.374148 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bec74521-464f-4b1e-bf55-7d2434aa4c10-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "bec74521-464f-4b1e-bf55-7d2434aa4c10" (UID: "bec74521-464f-4b1e-bf55-7d2434aa4c10"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:44:25 crc kubenswrapper[5021]: I0121 15:44:25.383009 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8411f55a-d436-4afe-a766-3f9f32fbdea5-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "8411f55a-d436-4afe-a766-3f9f32fbdea5" (UID: "8411f55a-d436-4afe-a766-3f9f32fbdea5"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:44:25 crc kubenswrapper[5021]: I0121 15:44:25.390394 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8411f55a-d436-4afe-a766-3f9f32fbdea5-config" (OuterVolumeSpecName: "config") pod "8411f55a-d436-4afe-a766-3f9f32fbdea5" (UID: "8411f55a-d436-4afe-a766-3f9f32fbdea5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:44:25 crc kubenswrapper[5021]: I0121 15:44:25.395160 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8411f55a-d436-4afe-a766-3f9f32fbdea5-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "8411f55a-d436-4afe-a766-3f9f32fbdea5" (UID: "8411f55a-d436-4afe-a766-3f9f32fbdea5"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:44:25 crc kubenswrapper[5021]: I0121 15:44:25.398376 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8411f55a-d436-4afe-a766-3f9f32fbdea5-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "8411f55a-d436-4afe-a766-3f9f32fbdea5" (UID: "8411f55a-d436-4afe-a766-3f9f32fbdea5"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:44:25 crc kubenswrapper[5021]: I0121 15:44:25.420454 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8411f55a-d436-4afe-a766-3f9f32fbdea5-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "8411f55a-d436-4afe-a766-3f9f32fbdea5" (UID: "8411f55a-d436-4afe-a766-3f9f32fbdea5"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:44:25 crc kubenswrapper[5021]: I0121 15:44:25.425434 5021 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bec74521-464f-4b1e-bf55-7d2434aa4c10-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:25 crc kubenswrapper[5021]: I0121 15:44:25.425470 5021 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8411f55a-d436-4afe-a766-3f9f32fbdea5-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:25 crc kubenswrapper[5021]: I0121 15:44:25.425481 5021 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8411f55a-d436-4afe-a766-3f9f32fbdea5-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:25 crc kubenswrapper[5021]: I0121 15:44:25.425492 5021 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8411f55a-d436-4afe-a766-3f9f32fbdea5-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:25 crc kubenswrapper[5021]: I0121 15:44:25.425505 5021 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8411f55a-d436-4afe-a766-3f9f32fbdea5-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:25 crc kubenswrapper[5021]: I0121 15:44:25.425517 5021 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8411f55a-d436-4afe-a766-3f9f32fbdea5-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:25 crc kubenswrapper[5021]: I0121 15:44:25.425526 5021 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bec74521-464f-4b1e-bf55-7d2434aa4c10-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:25 crc kubenswrapper[5021]: I0121 15:44:25.623514 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6c9c9f998c-9twkk" Jan 21 15:44:25 crc kubenswrapper[5021]: I0121 15:44:25.623542 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6c9c9f998c-9twkk" event={"ID":"bec74521-464f-4b1e-bf55-7d2434aa4c10","Type":"ContainerDied","Data":"facd119ebcc35ed55ddca372612dbf3ab114acd195deab2c0264f5715e1edd29"} Jan 21 15:44:25 crc kubenswrapper[5021]: I0121 15:44:25.623653 5021 scope.go:117] "RemoveContainer" containerID="7cf923c91ca02aef25d6abc95eadb6002daf816bebc497f2955ff9354722d8a9" Jan 21 15:44:25 crc kubenswrapper[5021]: I0121 15:44:25.627984 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-895cf5cf-kdjcl" Jan 21 15:44:25 crc kubenswrapper[5021]: I0121 15:44:25.628044 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-895cf5cf-kdjcl" event={"ID":"8411f55a-d436-4afe-a766-3f9f32fbdea5","Type":"ContainerDied","Data":"9a1359499b4d36cac4b8c3c4df437e7b325e598ae94f07e1c39a8e0785939872"} Jan 21 15:44:25 crc kubenswrapper[5021]: I0121 15:44:25.635632 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57c957c4ff-tjlrx" event={"ID":"bfa74c15-9387-4e75-8597-8ec28d5fea39","Type":"ContainerStarted","Data":"032a2db9ac8fdcf3e941b9b7c502bf1b6ae493a0a8760720e35e9f8f5adf6882"} Jan 21 15:44:25 crc kubenswrapper[5021]: I0121 15:44:25.636461 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-57c957c4ff-tjlrx" Jan 21 15:44:25 crc kubenswrapper[5021]: I0121 15:44:25.686977 5021 scope.go:117] "RemoveContainer" containerID="dfd4c48faeefcb7fd030cb2eeefcc956ec9fe6c993d10eab32610c40fdf47d2f" Jan 21 15:44:25 crc kubenswrapper[5021]: I0121 15:44:25.709583 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-57c957c4ff-tjlrx" podStartSLOduration=5.709558735 podStartE2EDuration="5.709558735s" podCreationTimestamp="2026-01-21 15:44:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:44:25.683393632 +0000 UTC m=+1207.218507521" watchObservedRunningTime="2026-01-21 15:44:25.709558735 +0000 UTC m=+1207.244672644" Jan 21 15:44:25 crc kubenswrapper[5021]: I0121 15:44:25.710565 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-895cf5cf-kdjcl"] Jan 21 15:44:25 crc kubenswrapper[5021]: I0121 15:44:25.720348 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-895cf5cf-kdjcl"] Jan 21 15:44:25 crc kubenswrapper[5021]: I0121 15:44:25.750778 5021 scope.go:117] "RemoveContainer" containerID="33605eb0347a6eeebaefe897ab3d452fe1119172475dee1120d600aaeb3ba8db" Jan 21 15:44:25 crc kubenswrapper[5021]: I0121 15:44:25.802609 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6c9c9f998c-9twkk"] Jan 21 15:44:25 crc kubenswrapper[5021]: I0121 15:44:25.837402 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6c9c9f998c-9twkk"] Jan 21 15:44:26 crc kubenswrapper[5021]: I0121 15:44:26.648205 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"7ecd501c-7632-497e-9f32-5ca8b60ea2d2","Type":"ContainerStarted","Data":"6423618e4f798127b221f0020865806b64603cacc671521fc8e021719922d114"} Jan 21 15:44:26 crc kubenswrapper[5021]: I0121 15:44:26.648297 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="7ecd501c-7632-497e-9f32-5ca8b60ea2d2" containerName="glance-log" containerID="cri-o://2222207d1f91ccab2e1591175304e36104367aaaea7641b601745cce585ca9c3" gracePeriod=30 Jan 21 15:44:26 crc kubenswrapper[5021]: I0121 15:44:26.648581 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="7ecd501c-7632-497e-9f32-5ca8b60ea2d2" containerName="glance-httpd" containerID="cri-o://6423618e4f798127b221f0020865806b64603cacc671521fc8e021719922d114" gracePeriod=30 Jan 21 15:44:26 crc kubenswrapper[5021]: I0121 15:44:26.655698 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"a2b22e1b-3ad7-4028-9c61-59365c57bd4b","Type":"ContainerStarted","Data":"9a286bca13698cf3723eccf10cd3533714ef03b0b034523d4b239312a8d255a7"} Jan 21 15:44:26 crc kubenswrapper[5021]: I0121 15:44:26.655930 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="a2b22e1b-3ad7-4028-9c61-59365c57bd4b" containerName="glance-log" containerID="cri-o://1381cb9e98b9d58aee20132248f39b0e3835aeefc14aa3be3101489ad1904c97" gracePeriod=30 Jan 21 15:44:26 crc kubenswrapper[5021]: I0121 15:44:26.656314 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="a2b22e1b-3ad7-4028-9c61-59365c57bd4b" containerName="glance-httpd" containerID="cri-o://9a286bca13698cf3723eccf10cd3533714ef03b0b034523d4b239312a8d255a7" gracePeriod=30 Jan 21 15:44:26 crc kubenswrapper[5021]: I0121 15:44:26.683569 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=7.683548645 podStartE2EDuration="7.683548645s" podCreationTimestamp="2026-01-21 15:44:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:44:26.668338136 +0000 UTC m=+1208.203452025" watchObservedRunningTime="2026-01-21 15:44:26.683548645 +0000 UTC m=+1208.218662534" Jan 21 15:44:26 crc kubenswrapper[5021]: I0121 15:44:26.755996 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8411f55a-d436-4afe-a766-3f9f32fbdea5" path="/var/lib/kubelet/pods/8411f55a-d436-4afe-a766-3f9f32fbdea5/volumes" Jan 21 15:44:26 crc kubenswrapper[5021]: I0121 15:44:26.756802 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bec74521-464f-4b1e-bf55-7d2434aa4c10" path="/var/lib/kubelet/pods/bec74521-464f-4b1e-bf55-7d2434aa4c10/volumes" Jan 21 15:44:27 crc kubenswrapper[5021]: E0121 15:44:27.060650 5021 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7ecd501c_7632_497e_9f32_5ca8b60ea2d2.slice/crio-6423618e4f798127b221f0020865806b64603cacc671521fc8e021719922d114.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7ecd501c_7632_497e_9f32_5ca8b60ea2d2.slice/crio-conmon-2222207d1f91ccab2e1591175304e36104367aaaea7641b601745cce585ca9c3.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda2b22e1b_3ad7_4028_9c61_59365c57bd4b.slice/crio-9a286bca13698cf3723eccf10cd3533714ef03b0b034523d4b239312a8d255a7.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda2b22e1b_3ad7_4028_9c61_59365c57bd4b.slice/crio-conmon-1381cb9e98b9d58aee20132248f39b0e3835aeefc14aa3be3101489ad1904c97.scope\": RecentStats: unable to find data in memory cache]" Jan 21 15:44:27 crc kubenswrapper[5021]: I0121 15:44:27.677304 5021 generic.go:334] "Generic (PLEG): container finished" podID="7ecd501c-7632-497e-9f32-5ca8b60ea2d2" containerID="6423618e4f798127b221f0020865806b64603cacc671521fc8e021719922d114" exitCode=0 Jan 21 15:44:27 crc kubenswrapper[5021]: I0121 15:44:27.677332 5021 generic.go:334] "Generic (PLEG): container finished" podID="7ecd501c-7632-497e-9f32-5ca8b60ea2d2" containerID="2222207d1f91ccab2e1591175304e36104367aaaea7641b601745cce585ca9c3" exitCode=143 Jan 21 15:44:27 crc kubenswrapper[5021]: I0121 15:44:27.677373 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"7ecd501c-7632-497e-9f32-5ca8b60ea2d2","Type":"ContainerDied","Data":"6423618e4f798127b221f0020865806b64603cacc671521fc8e021719922d114"} Jan 21 15:44:27 crc kubenswrapper[5021]: I0121 15:44:27.677412 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"7ecd501c-7632-497e-9f32-5ca8b60ea2d2","Type":"ContainerDied","Data":"2222207d1f91ccab2e1591175304e36104367aaaea7641b601745cce585ca9c3"} Jan 21 15:44:27 crc kubenswrapper[5021]: I0121 15:44:27.680200 5021 generic.go:334] "Generic (PLEG): container finished" podID="a2b22e1b-3ad7-4028-9c61-59365c57bd4b" containerID="9a286bca13698cf3723eccf10cd3533714ef03b0b034523d4b239312a8d255a7" exitCode=0 Jan 21 15:44:27 crc kubenswrapper[5021]: I0121 15:44:27.680218 5021 generic.go:334] "Generic (PLEG): container finished" podID="a2b22e1b-3ad7-4028-9c61-59365c57bd4b" containerID="1381cb9e98b9d58aee20132248f39b0e3835aeefc14aa3be3101489ad1904c97" exitCode=143 Jan 21 15:44:27 crc kubenswrapper[5021]: I0121 15:44:27.680232 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"a2b22e1b-3ad7-4028-9c61-59365c57bd4b","Type":"ContainerDied","Data":"9a286bca13698cf3723eccf10cd3533714ef03b0b034523d4b239312a8d255a7"} Jan 21 15:44:27 crc kubenswrapper[5021]: I0121 15:44:27.680245 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"a2b22e1b-3ad7-4028-9c61-59365c57bd4b","Type":"ContainerDied","Data":"1381cb9e98b9d58aee20132248f39b0e3835aeefc14aa3be3101489ad1904c97"} Jan 21 15:44:28 crc kubenswrapper[5021]: I0121 15:44:28.771568 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=9.771553419 podStartE2EDuration="9.771553419s" podCreationTimestamp="2026-01-21 15:44:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:44:26.69230416 +0000 UTC m=+1208.227418049" watchObservedRunningTime="2026-01-21 15:44:28.771553419 +0000 UTC m=+1210.306667308" Jan 21 15:44:29 crc kubenswrapper[5021]: I0121 15:44:29.699011 5021 generic.go:334] "Generic (PLEG): container finished" podID="f72eda61-8750-46bd-88ce-355f8b36acc7" containerID="5819573d359e2d9a1b65039d98dc4e760af3f22687fb7cd64949bd947e539de2" exitCode=0 Jan 21 15:44:29 crc kubenswrapper[5021]: I0121 15:44:29.699063 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-7k8dl" event={"ID":"f72eda61-8750-46bd-88ce-355f8b36acc7","Type":"ContainerDied","Data":"5819573d359e2d9a1b65039d98dc4e760af3f22687fb7cd64949bd947e539de2"} Jan 21 15:44:30 crc kubenswrapper[5021]: I0121 15:44:30.719179 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-57c957c4ff-tjlrx" Jan 21 15:44:30 crc kubenswrapper[5021]: I0121 15:44:30.780881 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-kprj2"] Jan 21 15:44:30 crc kubenswrapper[5021]: I0121 15:44:30.782772 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6d5b6d6b67-kprj2" podUID="6722dc85-c75c-4751-b0c9-1a7ffc45afaa" containerName="dnsmasq-dns" containerID="cri-o://93f595b09f88348355e1e3d069b9cbd62a7fe5a5d5da20d2e57498fab1e77609" gracePeriod=10 Jan 21 15:44:31 crc kubenswrapper[5021]: I0121 15:44:31.757631 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"7ecd501c-7632-497e-9f32-5ca8b60ea2d2","Type":"ContainerDied","Data":"30559304ec4940eca42b34a1e87f2a12837f618c680caee2244ad5b1cc78fad7"} Jan 21 15:44:31 crc kubenswrapper[5021]: I0121 15:44:31.757683 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="30559304ec4940eca42b34a1e87f2a12837f618c680caee2244ad5b1cc78fad7" Jan 21 15:44:31 crc kubenswrapper[5021]: I0121 15:44:31.765678 5021 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6d5b6d6b67-kprj2" podUID="6722dc85-c75c-4751-b0c9-1a7ffc45afaa" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.137:5353: connect: connection refused" Jan 21 15:44:31 crc kubenswrapper[5021]: I0121 15:44:31.767154 5021 generic.go:334] "Generic (PLEG): container finished" podID="6722dc85-c75c-4751-b0c9-1a7ffc45afaa" containerID="93f595b09f88348355e1e3d069b9cbd62a7fe5a5d5da20d2e57498fab1e77609" exitCode=0 Jan 21 15:44:31 crc kubenswrapper[5021]: I0121 15:44:31.767231 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d5b6d6b67-kprj2" event={"ID":"6722dc85-c75c-4751-b0c9-1a7ffc45afaa","Type":"ContainerDied","Data":"93f595b09f88348355e1e3d069b9cbd62a7fe5a5d5da20d2e57498fab1e77609"} Jan 21 15:44:31 crc kubenswrapper[5021]: I0121 15:44:31.778109 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"a2b22e1b-3ad7-4028-9c61-59365c57bd4b","Type":"ContainerDied","Data":"b1501fc0076db307445e1f7392c17f670a47faaead5b04c409eb6980a4d63111"} Jan 21 15:44:31 crc kubenswrapper[5021]: I0121 15:44:31.778149 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b1501fc0076db307445e1f7392c17f670a47faaead5b04c409eb6980a4d63111" Jan 21 15:44:31 crc kubenswrapper[5021]: I0121 15:44:31.827544 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 21 15:44:31 crc kubenswrapper[5021]: I0121 15:44:31.835932 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 21 15:44:31 crc kubenswrapper[5021]: I0121 15:44:31.867036 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a2b22e1b-3ad7-4028-9c61-59365c57bd4b-config-data\") pod \"a2b22e1b-3ad7-4028-9c61-59365c57bd4b\" (UID: \"a2b22e1b-3ad7-4028-9c61-59365c57bd4b\") " Jan 21 15:44:31 crc kubenswrapper[5021]: I0121 15:44:31.867084 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a2b22e1b-3ad7-4028-9c61-59365c57bd4b-httpd-run\") pod \"a2b22e1b-3ad7-4028-9c61-59365c57bd4b\" (UID: \"a2b22e1b-3ad7-4028-9c61-59365c57bd4b\") " Jan 21 15:44:31 crc kubenswrapper[5021]: I0121 15:44:31.867111 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/7ecd501c-7632-497e-9f32-5ca8b60ea2d2-httpd-run\") pod \"7ecd501c-7632-497e-9f32-5ca8b60ea2d2\" (UID: \"7ecd501c-7632-497e-9f32-5ca8b60ea2d2\") " Jan 21 15:44:31 crc kubenswrapper[5021]: I0121 15:44:31.867127 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"7ecd501c-7632-497e-9f32-5ca8b60ea2d2\" (UID: \"7ecd501c-7632-497e-9f32-5ca8b60ea2d2\") " Jan 21 15:44:31 crc kubenswrapper[5021]: I0121 15:44:31.867144 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7ecd501c-7632-497e-9f32-5ca8b60ea2d2-logs\") pod \"7ecd501c-7632-497e-9f32-5ca8b60ea2d2\" (UID: \"7ecd501c-7632-497e-9f32-5ca8b60ea2d2\") " Jan 21 15:44:31 crc kubenswrapper[5021]: I0121 15:44:31.867183 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ecd501c-7632-497e-9f32-5ca8b60ea2d2-public-tls-certs\") pod \"7ecd501c-7632-497e-9f32-5ca8b60ea2d2\" (UID: \"7ecd501c-7632-497e-9f32-5ca8b60ea2d2\") " Jan 21 15:44:31 crc kubenswrapper[5021]: I0121 15:44:31.867241 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6c55m\" (UniqueName: \"kubernetes.io/projected/7ecd501c-7632-497e-9f32-5ca8b60ea2d2-kube-api-access-6c55m\") pod \"7ecd501c-7632-497e-9f32-5ca8b60ea2d2\" (UID: \"7ecd501c-7632-497e-9f32-5ca8b60ea2d2\") " Jan 21 15:44:31 crc kubenswrapper[5021]: I0121 15:44:31.867292 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7ecd501c-7632-497e-9f32-5ca8b60ea2d2-scripts\") pod \"7ecd501c-7632-497e-9f32-5ca8b60ea2d2\" (UID: \"7ecd501c-7632-497e-9f32-5ca8b60ea2d2\") " Jan 21 15:44:31 crc kubenswrapper[5021]: I0121 15:44:31.867334 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-52xs8\" (UniqueName: \"kubernetes.io/projected/a2b22e1b-3ad7-4028-9c61-59365c57bd4b-kube-api-access-52xs8\") pod \"a2b22e1b-3ad7-4028-9c61-59365c57bd4b\" (UID: \"a2b22e1b-3ad7-4028-9c61-59365c57bd4b\") " Jan 21 15:44:31 crc kubenswrapper[5021]: I0121 15:44:31.867370 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ecd501c-7632-497e-9f32-5ca8b60ea2d2-config-data\") pod \"7ecd501c-7632-497e-9f32-5ca8b60ea2d2\" (UID: \"7ecd501c-7632-497e-9f32-5ca8b60ea2d2\") " Jan 21 15:44:31 crc kubenswrapper[5021]: I0121 15:44:31.867410 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2b22e1b-3ad7-4028-9c61-59365c57bd4b-combined-ca-bundle\") pod \"a2b22e1b-3ad7-4028-9c61-59365c57bd4b\" (UID: \"a2b22e1b-3ad7-4028-9c61-59365c57bd4b\") " Jan 21 15:44:31 crc kubenswrapper[5021]: I0121 15:44:31.867437 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a2b22e1b-3ad7-4028-9c61-59365c57bd4b-scripts\") pod \"a2b22e1b-3ad7-4028-9c61-59365c57bd4b\" (UID: \"a2b22e1b-3ad7-4028-9c61-59365c57bd4b\") " Jan 21 15:44:31 crc kubenswrapper[5021]: I0121 15:44:31.867462 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"a2b22e1b-3ad7-4028-9c61-59365c57bd4b\" (UID: \"a2b22e1b-3ad7-4028-9c61-59365c57bd4b\") " Jan 21 15:44:31 crc kubenswrapper[5021]: I0121 15:44:31.867495 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a2b22e1b-3ad7-4028-9c61-59365c57bd4b-internal-tls-certs\") pod \"a2b22e1b-3ad7-4028-9c61-59365c57bd4b\" (UID: \"a2b22e1b-3ad7-4028-9c61-59365c57bd4b\") " Jan 21 15:44:31 crc kubenswrapper[5021]: I0121 15:44:31.867535 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a2b22e1b-3ad7-4028-9c61-59365c57bd4b-logs\") pod \"a2b22e1b-3ad7-4028-9c61-59365c57bd4b\" (UID: \"a2b22e1b-3ad7-4028-9c61-59365c57bd4b\") " Jan 21 15:44:31 crc kubenswrapper[5021]: I0121 15:44:31.867552 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ecd501c-7632-497e-9f32-5ca8b60ea2d2-combined-ca-bundle\") pod \"7ecd501c-7632-497e-9f32-5ca8b60ea2d2\" (UID: \"7ecd501c-7632-497e-9f32-5ca8b60ea2d2\") " Jan 21 15:44:31 crc kubenswrapper[5021]: I0121 15:44:31.871001 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a2b22e1b-3ad7-4028-9c61-59365c57bd4b-logs" (OuterVolumeSpecName: "logs") pod "a2b22e1b-3ad7-4028-9c61-59365c57bd4b" (UID: "a2b22e1b-3ad7-4028-9c61-59365c57bd4b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:44:31 crc kubenswrapper[5021]: I0121 15:44:31.871551 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a2b22e1b-3ad7-4028-9c61-59365c57bd4b-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "a2b22e1b-3ad7-4028-9c61-59365c57bd4b" (UID: "a2b22e1b-3ad7-4028-9c61-59365c57bd4b"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:44:31 crc kubenswrapper[5021]: I0121 15:44:31.883373 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7ecd501c-7632-497e-9f32-5ca8b60ea2d2-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "7ecd501c-7632-497e-9f32-5ca8b60ea2d2" (UID: "7ecd501c-7632-497e-9f32-5ca8b60ea2d2"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:44:31 crc kubenswrapper[5021]: I0121 15:44:31.884706 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7ecd501c-7632-497e-9f32-5ca8b60ea2d2-logs" (OuterVolumeSpecName: "logs") pod "7ecd501c-7632-497e-9f32-5ca8b60ea2d2" (UID: "7ecd501c-7632-497e-9f32-5ca8b60ea2d2"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:44:31 crc kubenswrapper[5021]: I0121 15:44:31.905564 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7ecd501c-7632-497e-9f32-5ca8b60ea2d2-kube-api-access-6c55m" (OuterVolumeSpecName: "kube-api-access-6c55m") pod "7ecd501c-7632-497e-9f32-5ca8b60ea2d2" (UID: "7ecd501c-7632-497e-9f32-5ca8b60ea2d2"). InnerVolumeSpecName "kube-api-access-6c55m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:44:31 crc kubenswrapper[5021]: I0121 15:44:31.914598 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a2b22e1b-3ad7-4028-9c61-59365c57bd4b-kube-api-access-52xs8" (OuterVolumeSpecName: "kube-api-access-52xs8") pod "a2b22e1b-3ad7-4028-9c61-59365c57bd4b" (UID: "a2b22e1b-3ad7-4028-9c61-59365c57bd4b"). InnerVolumeSpecName "kube-api-access-52xs8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:44:31 crc kubenswrapper[5021]: I0121 15:44:31.933979 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "glance") pod "7ecd501c-7632-497e-9f32-5ca8b60ea2d2" (UID: "7ecd501c-7632-497e-9f32-5ca8b60ea2d2"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 21 15:44:31 crc kubenswrapper[5021]: I0121 15:44:31.940995 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ecd501c-7632-497e-9f32-5ca8b60ea2d2-scripts" (OuterVolumeSpecName: "scripts") pod "7ecd501c-7632-497e-9f32-5ca8b60ea2d2" (UID: "7ecd501c-7632-497e-9f32-5ca8b60ea2d2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:44:31 crc kubenswrapper[5021]: I0121 15:44:31.949320 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2b22e1b-3ad7-4028-9c61-59365c57bd4b-scripts" (OuterVolumeSpecName: "scripts") pod "a2b22e1b-3ad7-4028-9c61-59365c57bd4b" (UID: "a2b22e1b-3ad7-4028-9c61-59365c57bd4b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:44:31 crc kubenswrapper[5021]: I0121 15:44:31.971491 5021 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a2b22e1b-3ad7-4028-9c61-59365c57bd4b-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:31 crc kubenswrapper[5021]: I0121 15:44:31.971799 5021 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a2b22e1b-3ad7-4028-9c61-59365c57bd4b-logs\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:31 crc kubenswrapper[5021]: I0121 15:44:31.971956 5021 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a2b22e1b-3ad7-4028-9c61-59365c57bd4b-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:31 crc kubenswrapper[5021]: I0121 15:44:31.972049 5021 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/7ecd501c-7632-497e-9f32-5ca8b60ea2d2-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:31 crc kubenswrapper[5021]: I0121 15:44:31.972145 5021 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Jan 21 15:44:31 crc kubenswrapper[5021]: I0121 15:44:31.972226 5021 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7ecd501c-7632-497e-9f32-5ca8b60ea2d2-logs\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:31 crc kubenswrapper[5021]: I0121 15:44:31.972298 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6c55m\" (UniqueName: \"kubernetes.io/projected/7ecd501c-7632-497e-9f32-5ca8b60ea2d2-kube-api-access-6c55m\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:31 crc kubenswrapper[5021]: I0121 15:44:31.972387 5021 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7ecd501c-7632-497e-9f32-5ca8b60ea2d2-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:31 crc kubenswrapper[5021]: I0121 15:44:31.972466 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-52xs8\" (UniqueName: \"kubernetes.io/projected/a2b22e1b-3ad7-4028-9c61-59365c57bd4b-kube-api-access-52xs8\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:31 crc kubenswrapper[5021]: I0121 15:44:31.972143 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "glance") pod "a2b22e1b-3ad7-4028-9c61-59365c57bd4b" (UID: "a2b22e1b-3ad7-4028-9c61-59365c57bd4b"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 21 15:44:32 crc kubenswrapper[5021]: I0121 15:44:32.049091 5021 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Jan 21 15:44:32 crc kubenswrapper[5021]: I0121 15:44:32.071968 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ecd501c-7632-497e-9f32-5ca8b60ea2d2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7ecd501c-7632-497e-9f32-5ca8b60ea2d2" (UID: "7ecd501c-7632-497e-9f32-5ca8b60ea2d2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:44:32 crc kubenswrapper[5021]: I0121 15:44:32.074193 5021 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Jan 21 15:44:32 crc kubenswrapper[5021]: I0121 15:44:32.074219 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ecd501c-7632-497e-9f32-5ca8b60ea2d2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:32 crc kubenswrapper[5021]: I0121 15:44:32.074231 5021 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:32 crc kubenswrapper[5021]: I0121 15:44:32.089681 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ecd501c-7632-497e-9f32-5ca8b60ea2d2-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "7ecd501c-7632-497e-9f32-5ca8b60ea2d2" (UID: "7ecd501c-7632-497e-9f32-5ca8b60ea2d2"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:44:32 crc kubenswrapper[5021]: I0121 15:44:32.097086 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2b22e1b-3ad7-4028-9c61-59365c57bd4b-config-data" (OuterVolumeSpecName: "config-data") pod "a2b22e1b-3ad7-4028-9c61-59365c57bd4b" (UID: "a2b22e1b-3ad7-4028-9c61-59365c57bd4b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:44:32 crc kubenswrapper[5021]: I0121 15:44:32.111269 5021 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Jan 21 15:44:32 crc kubenswrapper[5021]: I0121 15:44:32.122040 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2b22e1b-3ad7-4028-9c61-59365c57bd4b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a2b22e1b-3ad7-4028-9c61-59365c57bd4b" (UID: "a2b22e1b-3ad7-4028-9c61-59365c57bd4b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:44:32 crc kubenswrapper[5021]: I0121 15:44:32.147106 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ecd501c-7632-497e-9f32-5ca8b60ea2d2-config-data" (OuterVolumeSpecName: "config-data") pod "7ecd501c-7632-497e-9f32-5ca8b60ea2d2" (UID: "7ecd501c-7632-497e-9f32-5ca8b60ea2d2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:44:32 crc kubenswrapper[5021]: I0121 15:44:32.150707 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2b22e1b-3ad7-4028-9c61-59365c57bd4b-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "a2b22e1b-3ad7-4028-9c61-59365c57bd4b" (UID: "a2b22e1b-3ad7-4028-9c61-59365c57bd4b"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:44:32 crc kubenswrapper[5021]: I0121 15:44:32.179151 5021 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:32 crc kubenswrapper[5021]: I0121 15:44:32.179204 5021 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a2b22e1b-3ad7-4028-9c61-59365c57bd4b-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:32 crc kubenswrapper[5021]: I0121 15:44:32.179220 5021 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a2b22e1b-3ad7-4028-9c61-59365c57bd4b-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:32 crc kubenswrapper[5021]: I0121 15:44:32.179233 5021 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ecd501c-7632-497e-9f32-5ca8b60ea2d2-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:32 crc kubenswrapper[5021]: I0121 15:44:32.179244 5021 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ecd501c-7632-497e-9f32-5ca8b60ea2d2-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:32 crc kubenswrapper[5021]: I0121 15:44:32.179257 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2b22e1b-3ad7-4028-9c61-59365c57bd4b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:32 crc kubenswrapper[5021]: I0121 15:44:32.787638 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 21 15:44:32 crc kubenswrapper[5021]: I0121 15:44:32.787688 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 21 15:44:32 crc kubenswrapper[5021]: I0121 15:44:32.834713 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 21 15:44:32 crc kubenswrapper[5021]: I0121 15:44:32.844535 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 21 15:44:32 crc kubenswrapper[5021]: I0121 15:44:32.862584 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 21 15:44:32 crc kubenswrapper[5021]: I0121 15:44:32.871491 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 21 15:44:32 crc kubenswrapper[5021]: I0121 15:44:32.879172 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 21 15:44:32 crc kubenswrapper[5021]: E0121 15:44:32.879650 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2b22e1b-3ad7-4028-9c61-59365c57bd4b" containerName="glance-log" Jan 21 15:44:32 crc kubenswrapper[5021]: I0121 15:44:32.879672 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2b22e1b-3ad7-4028-9c61-59365c57bd4b" containerName="glance-log" Jan 21 15:44:32 crc kubenswrapper[5021]: E0121 15:44:32.879686 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8411f55a-d436-4afe-a766-3f9f32fbdea5" containerName="init" Jan 21 15:44:32 crc kubenswrapper[5021]: I0121 15:44:32.879694 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="8411f55a-d436-4afe-a766-3f9f32fbdea5" containerName="init" Jan 21 15:44:32 crc kubenswrapper[5021]: E0121 15:44:32.879709 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ecd501c-7632-497e-9f32-5ca8b60ea2d2" containerName="glance-httpd" Jan 21 15:44:32 crc kubenswrapper[5021]: I0121 15:44:32.879716 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ecd501c-7632-497e-9f32-5ca8b60ea2d2" containerName="glance-httpd" Jan 21 15:44:32 crc kubenswrapper[5021]: E0121 15:44:32.879730 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2b22e1b-3ad7-4028-9c61-59365c57bd4b" containerName="glance-httpd" Jan 21 15:44:32 crc kubenswrapper[5021]: I0121 15:44:32.879737 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2b22e1b-3ad7-4028-9c61-59365c57bd4b" containerName="glance-httpd" Jan 21 15:44:32 crc kubenswrapper[5021]: E0121 15:44:32.879753 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ecd501c-7632-497e-9f32-5ca8b60ea2d2" containerName="glance-log" Jan 21 15:44:32 crc kubenswrapper[5021]: I0121 15:44:32.879761 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ecd501c-7632-497e-9f32-5ca8b60ea2d2" containerName="glance-log" Jan 21 15:44:32 crc kubenswrapper[5021]: E0121 15:44:32.879774 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8411f55a-d436-4afe-a766-3f9f32fbdea5" containerName="dnsmasq-dns" Jan 21 15:44:32 crc kubenswrapper[5021]: I0121 15:44:32.879780 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="8411f55a-d436-4afe-a766-3f9f32fbdea5" containerName="dnsmasq-dns" Jan 21 15:44:32 crc kubenswrapper[5021]: E0121 15:44:32.879811 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bec74521-464f-4b1e-bf55-7d2434aa4c10" containerName="init" Jan 21 15:44:32 crc kubenswrapper[5021]: I0121 15:44:32.879819 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="bec74521-464f-4b1e-bf55-7d2434aa4c10" containerName="init" Jan 21 15:44:32 crc kubenswrapper[5021]: I0121 15:44:32.880018 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2b22e1b-3ad7-4028-9c61-59365c57bd4b" containerName="glance-log" Jan 21 15:44:32 crc kubenswrapper[5021]: I0121 15:44:32.880034 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="7ecd501c-7632-497e-9f32-5ca8b60ea2d2" containerName="glance-log" Jan 21 15:44:32 crc kubenswrapper[5021]: I0121 15:44:32.880051 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2b22e1b-3ad7-4028-9c61-59365c57bd4b" containerName="glance-httpd" Jan 21 15:44:32 crc kubenswrapper[5021]: I0121 15:44:32.880063 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="7ecd501c-7632-497e-9f32-5ca8b60ea2d2" containerName="glance-httpd" Jan 21 15:44:32 crc kubenswrapper[5021]: I0121 15:44:32.880085 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="8411f55a-d436-4afe-a766-3f9f32fbdea5" containerName="dnsmasq-dns" Jan 21 15:44:32 crc kubenswrapper[5021]: I0121 15:44:32.880119 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="bec74521-464f-4b1e-bf55-7d2434aa4c10" containerName="init" Jan 21 15:44:32 crc kubenswrapper[5021]: I0121 15:44:32.881266 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 21 15:44:32 crc kubenswrapper[5021]: I0121 15:44:32.884924 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Jan 21 15:44:32 crc kubenswrapper[5021]: I0121 15:44:32.885972 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Jan 21 15:44:32 crc kubenswrapper[5021]: I0121 15:44:32.886139 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-d767q" Jan 21 15:44:32 crc kubenswrapper[5021]: I0121 15:44:32.886205 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 21 15:44:32 crc kubenswrapper[5021]: I0121 15:44:32.887205 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 21 15:44:32 crc kubenswrapper[5021]: I0121 15:44:32.888660 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 21 15:44:32 crc kubenswrapper[5021]: I0121 15:44:32.896481 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 21 15:44:32 crc kubenswrapper[5021]: I0121 15:44:32.896743 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 21 15:44:32 crc kubenswrapper[5021]: I0121 15:44:32.920999 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 21 15:44:32 crc kubenswrapper[5021]: I0121 15:44:32.976590 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 21 15:44:33 crc kubenswrapper[5021]: I0121 15:44:32.999738 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/afc0c781-38b3-4ee6-ae9d-82d8649978cd-logs\") pod \"glance-default-internal-api-0\" (UID: \"afc0c781-38b3-4ee6-ae9d-82d8649978cd\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:44:33 crc kubenswrapper[5021]: I0121 15:44:32.999802 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/afc0c781-38b3-4ee6-ae9d-82d8649978cd-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"afc0c781-38b3-4ee6-ae9d-82d8649978cd\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:44:33 crc kubenswrapper[5021]: I0121 15:44:32.999834 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/afc0c781-38b3-4ee6-ae9d-82d8649978cd-scripts\") pod \"glance-default-internal-api-0\" (UID: \"afc0c781-38b3-4ee6-ae9d-82d8649978cd\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:44:33 crc kubenswrapper[5021]: I0121 15:44:32.999897 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-internal-api-0\" (UID: \"afc0c781-38b3-4ee6-ae9d-82d8649978cd\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:44:33 crc kubenswrapper[5021]: I0121 15:44:32.999953 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/afc0c781-38b3-4ee6-ae9d-82d8649978cd-config-data\") pod \"glance-default-internal-api-0\" (UID: \"afc0c781-38b3-4ee6-ae9d-82d8649978cd\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:44:33 crc kubenswrapper[5021]: I0121 15:44:32.999983 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/afc0c781-38b3-4ee6-ae9d-82d8649978cd-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"afc0c781-38b3-4ee6-ae9d-82d8649978cd\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:44:33 crc kubenswrapper[5021]: I0121 15:44:33.000057 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/afc0c781-38b3-4ee6-ae9d-82d8649978cd-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"afc0c781-38b3-4ee6-ae9d-82d8649978cd\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:44:33 crc kubenswrapper[5021]: I0121 15:44:33.000109 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ft5j8\" (UniqueName: \"kubernetes.io/projected/afc0c781-38b3-4ee6-ae9d-82d8649978cd-kube-api-access-ft5j8\") pod \"glance-default-internal-api-0\" (UID: \"afc0c781-38b3-4ee6-ae9d-82d8649978cd\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:44:33 crc kubenswrapper[5021]: I0121 15:44:33.102224 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/afc0c781-38b3-4ee6-ae9d-82d8649978cd-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"afc0c781-38b3-4ee6-ae9d-82d8649978cd\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:44:33 crc kubenswrapper[5021]: I0121 15:44:33.102341 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ft5j8\" (UniqueName: \"kubernetes.io/projected/afc0c781-38b3-4ee6-ae9d-82d8649978cd-kube-api-access-ft5j8\") pod \"glance-default-internal-api-0\" (UID: \"afc0c781-38b3-4ee6-ae9d-82d8649978cd\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:44:33 crc kubenswrapper[5021]: I0121 15:44:33.102407 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9b81a4e0-f0d5-4e03-b00f-f77dad44be9c-scripts\") pod \"glance-default-external-api-0\" (UID: \"9b81a4e0-f0d5-4e03-b00f-f77dad44be9c\") " pod="openstack/glance-default-external-api-0" Jan 21 15:44:33 crc kubenswrapper[5021]: I0121 15:44:33.102442 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"9b81a4e0-f0d5-4e03-b00f-f77dad44be9c\") " pod="openstack/glance-default-external-api-0" Jan 21 15:44:33 crc kubenswrapper[5021]: I0121 15:44:33.102470 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/afc0c781-38b3-4ee6-ae9d-82d8649978cd-logs\") pod \"glance-default-internal-api-0\" (UID: \"afc0c781-38b3-4ee6-ae9d-82d8649978cd\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:44:33 crc kubenswrapper[5021]: I0121 15:44:33.102499 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-22m72\" (UniqueName: \"kubernetes.io/projected/9b81a4e0-f0d5-4e03-b00f-f77dad44be9c-kube-api-access-22m72\") pod \"glance-default-external-api-0\" (UID: \"9b81a4e0-f0d5-4e03-b00f-f77dad44be9c\") " pod="openstack/glance-default-external-api-0" Jan 21 15:44:33 crc kubenswrapper[5021]: I0121 15:44:33.102526 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/afc0c781-38b3-4ee6-ae9d-82d8649978cd-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"afc0c781-38b3-4ee6-ae9d-82d8649978cd\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:44:33 crc kubenswrapper[5021]: I0121 15:44:33.102554 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/afc0c781-38b3-4ee6-ae9d-82d8649978cd-scripts\") pod \"glance-default-internal-api-0\" (UID: \"afc0c781-38b3-4ee6-ae9d-82d8649978cd\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:44:33 crc kubenswrapper[5021]: I0121 15:44:33.102586 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9b81a4e0-f0d5-4e03-b00f-f77dad44be9c-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"9b81a4e0-f0d5-4e03-b00f-f77dad44be9c\") " pod="openstack/glance-default-external-api-0" Jan 21 15:44:33 crc kubenswrapper[5021]: I0121 15:44:33.102650 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b81a4e0-f0d5-4e03-b00f-f77dad44be9c-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"9b81a4e0-f0d5-4e03-b00f-f77dad44be9c\") " pod="openstack/glance-default-external-api-0" Jan 21 15:44:33 crc kubenswrapper[5021]: I0121 15:44:33.102685 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-internal-api-0\" (UID: \"afc0c781-38b3-4ee6-ae9d-82d8649978cd\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:44:33 crc kubenswrapper[5021]: I0121 15:44:33.102716 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b81a4e0-f0d5-4e03-b00f-f77dad44be9c-config-data\") pod \"glance-default-external-api-0\" (UID: \"9b81a4e0-f0d5-4e03-b00f-f77dad44be9c\") " pod="openstack/glance-default-external-api-0" Jan 21 15:44:33 crc kubenswrapper[5021]: I0121 15:44:33.102745 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/afc0c781-38b3-4ee6-ae9d-82d8649978cd-config-data\") pod \"glance-default-internal-api-0\" (UID: \"afc0c781-38b3-4ee6-ae9d-82d8649978cd\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:44:33 crc kubenswrapper[5021]: I0121 15:44:33.102772 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9b81a4e0-f0d5-4e03-b00f-f77dad44be9c-logs\") pod \"glance-default-external-api-0\" (UID: \"9b81a4e0-f0d5-4e03-b00f-f77dad44be9c\") " pod="openstack/glance-default-external-api-0" Jan 21 15:44:33 crc kubenswrapper[5021]: I0121 15:44:33.102796 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9b81a4e0-f0d5-4e03-b00f-f77dad44be9c-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"9b81a4e0-f0d5-4e03-b00f-f77dad44be9c\") " pod="openstack/glance-default-external-api-0" Jan 21 15:44:33 crc kubenswrapper[5021]: I0121 15:44:33.102821 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/afc0c781-38b3-4ee6-ae9d-82d8649978cd-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"afc0c781-38b3-4ee6-ae9d-82d8649978cd\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:44:33 crc kubenswrapper[5021]: I0121 15:44:33.105237 5021 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-internal-api-0\" (UID: \"afc0c781-38b3-4ee6-ae9d-82d8649978cd\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/glance-default-internal-api-0" Jan 21 15:44:33 crc kubenswrapper[5021]: I0121 15:44:33.105499 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/afc0c781-38b3-4ee6-ae9d-82d8649978cd-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"afc0c781-38b3-4ee6-ae9d-82d8649978cd\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:44:33 crc kubenswrapper[5021]: I0121 15:44:33.105869 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/afc0c781-38b3-4ee6-ae9d-82d8649978cd-logs\") pod \"glance-default-internal-api-0\" (UID: \"afc0c781-38b3-4ee6-ae9d-82d8649978cd\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:44:33 crc kubenswrapper[5021]: I0121 15:44:33.110561 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/afc0c781-38b3-4ee6-ae9d-82d8649978cd-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"afc0c781-38b3-4ee6-ae9d-82d8649978cd\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:44:33 crc kubenswrapper[5021]: I0121 15:44:33.111012 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/afc0c781-38b3-4ee6-ae9d-82d8649978cd-scripts\") pod \"glance-default-internal-api-0\" (UID: \"afc0c781-38b3-4ee6-ae9d-82d8649978cd\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:44:33 crc kubenswrapper[5021]: I0121 15:44:33.111534 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/afc0c781-38b3-4ee6-ae9d-82d8649978cd-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"afc0c781-38b3-4ee6-ae9d-82d8649978cd\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:44:33 crc kubenswrapper[5021]: I0121 15:44:33.118040 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/afc0c781-38b3-4ee6-ae9d-82d8649978cd-config-data\") pod \"glance-default-internal-api-0\" (UID: \"afc0c781-38b3-4ee6-ae9d-82d8649978cd\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:44:33 crc kubenswrapper[5021]: I0121 15:44:33.123035 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ft5j8\" (UniqueName: \"kubernetes.io/projected/afc0c781-38b3-4ee6-ae9d-82d8649978cd-kube-api-access-ft5j8\") pod \"glance-default-internal-api-0\" (UID: \"afc0c781-38b3-4ee6-ae9d-82d8649978cd\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:44:33 crc kubenswrapper[5021]: I0121 15:44:33.142654 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-internal-api-0\" (UID: \"afc0c781-38b3-4ee6-ae9d-82d8649978cd\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:44:33 crc kubenswrapper[5021]: I0121 15:44:33.204513 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"9b81a4e0-f0d5-4e03-b00f-f77dad44be9c\") " pod="openstack/glance-default-external-api-0" Jan 21 15:44:33 crc kubenswrapper[5021]: I0121 15:44:33.204635 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-22m72\" (UniqueName: \"kubernetes.io/projected/9b81a4e0-f0d5-4e03-b00f-f77dad44be9c-kube-api-access-22m72\") pod \"glance-default-external-api-0\" (UID: \"9b81a4e0-f0d5-4e03-b00f-f77dad44be9c\") " pod="openstack/glance-default-external-api-0" Jan 21 15:44:33 crc kubenswrapper[5021]: I0121 15:44:33.204672 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9b81a4e0-f0d5-4e03-b00f-f77dad44be9c-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"9b81a4e0-f0d5-4e03-b00f-f77dad44be9c\") " pod="openstack/glance-default-external-api-0" Jan 21 15:44:33 crc kubenswrapper[5021]: I0121 15:44:33.204711 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b81a4e0-f0d5-4e03-b00f-f77dad44be9c-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"9b81a4e0-f0d5-4e03-b00f-f77dad44be9c\") " pod="openstack/glance-default-external-api-0" Jan 21 15:44:33 crc kubenswrapper[5021]: I0121 15:44:33.204745 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b81a4e0-f0d5-4e03-b00f-f77dad44be9c-config-data\") pod \"glance-default-external-api-0\" (UID: \"9b81a4e0-f0d5-4e03-b00f-f77dad44be9c\") " pod="openstack/glance-default-external-api-0" Jan 21 15:44:33 crc kubenswrapper[5021]: I0121 15:44:33.204765 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9b81a4e0-f0d5-4e03-b00f-f77dad44be9c-logs\") pod \"glance-default-external-api-0\" (UID: \"9b81a4e0-f0d5-4e03-b00f-f77dad44be9c\") " pod="openstack/glance-default-external-api-0" Jan 21 15:44:33 crc kubenswrapper[5021]: I0121 15:44:33.204780 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9b81a4e0-f0d5-4e03-b00f-f77dad44be9c-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"9b81a4e0-f0d5-4e03-b00f-f77dad44be9c\") " pod="openstack/glance-default-external-api-0" Jan 21 15:44:33 crc kubenswrapper[5021]: I0121 15:44:33.204861 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9b81a4e0-f0d5-4e03-b00f-f77dad44be9c-scripts\") pod \"glance-default-external-api-0\" (UID: \"9b81a4e0-f0d5-4e03-b00f-f77dad44be9c\") " pod="openstack/glance-default-external-api-0" Jan 21 15:44:33 crc kubenswrapper[5021]: I0121 15:44:33.205179 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9b81a4e0-f0d5-4e03-b00f-f77dad44be9c-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"9b81a4e0-f0d5-4e03-b00f-f77dad44be9c\") " pod="openstack/glance-default-external-api-0" Jan 21 15:44:33 crc kubenswrapper[5021]: I0121 15:44:33.205220 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9b81a4e0-f0d5-4e03-b00f-f77dad44be9c-logs\") pod \"glance-default-external-api-0\" (UID: \"9b81a4e0-f0d5-4e03-b00f-f77dad44be9c\") " pod="openstack/glance-default-external-api-0" Jan 21 15:44:33 crc kubenswrapper[5021]: I0121 15:44:33.205265 5021 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"9b81a4e0-f0d5-4e03-b00f-f77dad44be9c\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/glance-default-external-api-0" Jan 21 15:44:33 crc kubenswrapper[5021]: I0121 15:44:33.211403 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b81a4e0-f0d5-4e03-b00f-f77dad44be9c-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"9b81a4e0-f0d5-4e03-b00f-f77dad44be9c\") " pod="openstack/glance-default-external-api-0" Jan 21 15:44:33 crc kubenswrapper[5021]: I0121 15:44:33.211971 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9b81a4e0-f0d5-4e03-b00f-f77dad44be9c-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"9b81a4e0-f0d5-4e03-b00f-f77dad44be9c\") " pod="openstack/glance-default-external-api-0" Jan 21 15:44:33 crc kubenswrapper[5021]: I0121 15:44:33.212502 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b81a4e0-f0d5-4e03-b00f-f77dad44be9c-config-data\") pod \"glance-default-external-api-0\" (UID: \"9b81a4e0-f0d5-4e03-b00f-f77dad44be9c\") " pod="openstack/glance-default-external-api-0" Jan 21 15:44:33 crc kubenswrapper[5021]: I0121 15:44:33.213116 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9b81a4e0-f0d5-4e03-b00f-f77dad44be9c-scripts\") pod \"glance-default-external-api-0\" (UID: \"9b81a4e0-f0d5-4e03-b00f-f77dad44be9c\") " pod="openstack/glance-default-external-api-0" Jan 21 15:44:33 crc kubenswrapper[5021]: I0121 15:44:33.227503 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"9b81a4e0-f0d5-4e03-b00f-f77dad44be9c\") " pod="openstack/glance-default-external-api-0" Jan 21 15:44:33 crc kubenswrapper[5021]: I0121 15:44:33.236655 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-22m72\" (UniqueName: \"kubernetes.io/projected/9b81a4e0-f0d5-4e03-b00f-f77dad44be9c-kube-api-access-22m72\") pod \"glance-default-external-api-0\" (UID: \"9b81a4e0-f0d5-4e03-b00f-f77dad44be9c\") " pod="openstack/glance-default-external-api-0" Jan 21 15:44:33 crc kubenswrapper[5021]: I0121 15:44:33.255396 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 21 15:44:33 crc kubenswrapper[5021]: I0121 15:44:33.269252 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 21 15:44:34 crc kubenswrapper[5021]: I0121 15:44:34.747602 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7ecd501c-7632-497e-9f32-5ca8b60ea2d2" path="/var/lib/kubelet/pods/7ecd501c-7632-497e-9f32-5ca8b60ea2d2/volumes" Jan 21 15:44:34 crc kubenswrapper[5021]: I0121 15:44:34.748844 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a2b22e1b-3ad7-4028-9c61-59365c57bd4b" path="/var/lib/kubelet/pods/a2b22e1b-3ad7-4028-9c61-59365c57bd4b/volumes" Jan 21 15:44:35 crc kubenswrapper[5021]: I0121 15:44:35.992275 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-7k8dl" Jan 21 15:44:36 crc kubenswrapper[5021]: I0121 15:44:36.164153 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/f72eda61-8750-46bd-88ce-355f8b36acc7-credential-keys\") pod \"f72eda61-8750-46bd-88ce-355f8b36acc7\" (UID: \"f72eda61-8750-46bd-88ce-355f8b36acc7\") " Jan 21 15:44:36 crc kubenswrapper[5021]: I0121 15:44:36.165669 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/f72eda61-8750-46bd-88ce-355f8b36acc7-fernet-keys\") pod \"f72eda61-8750-46bd-88ce-355f8b36acc7\" (UID: \"f72eda61-8750-46bd-88ce-355f8b36acc7\") " Jan 21 15:44:36 crc kubenswrapper[5021]: I0121 15:44:36.165724 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kw7wd\" (UniqueName: \"kubernetes.io/projected/f72eda61-8750-46bd-88ce-355f8b36acc7-kube-api-access-kw7wd\") pod \"f72eda61-8750-46bd-88ce-355f8b36acc7\" (UID: \"f72eda61-8750-46bd-88ce-355f8b36acc7\") " Jan 21 15:44:36 crc kubenswrapper[5021]: I0121 15:44:36.165827 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f72eda61-8750-46bd-88ce-355f8b36acc7-scripts\") pod \"f72eda61-8750-46bd-88ce-355f8b36acc7\" (UID: \"f72eda61-8750-46bd-88ce-355f8b36acc7\") " Jan 21 15:44:36 crc kubenswrapper[5021]: I0121 15:44:36.165863 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f72eda61-8750-46bd-88ce-355f8b36acc7-config-data\") pod \"f72eda61-8750-46bd-88ce-355f8b36acc7\" (UID: \"f72eda61-8750-46bd-88ce-355f8b36acc7\") " Jan 21 15:44:36 crc kubenswrapper[5021]: I0121 15:44:36.165889 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f72eda61-8750-46bd-88ce-355f8b36acc7-combined-ca-bundle\") pod \"f72eda61-8750-46bd-88ce-355f8b36acc7\" (UID: \"f72eda61-8750-46bd-88ce-355f8b36acc7\") " Jan 21 15:44:36 crc kubenswrapper[5021]: I0121 15:44:36.172178 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f72eda61-8750-46bd-88ce-355f8b36acc7-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "f72eda61-8750-46bd-88ce-355f8b36acc7" (UID: "f72eda61-8750-46bd-88ce-355f8b36acc7"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:44:36 crc kubenswrapper[5021]: I0121 15:44:36.172204 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f72eda61-8750-46bd-88ce-355f8b36acc7-kube-api-access-kw7wd" (OuterVolumeSpecName: "kube-api-access-kw7wd") pod "f72eda61-8750-46bd-88ce-355f8b36acc7" (UID: "f72eda61-8750-46bd-88ce-355f8b36acc7"). InnerVolumeSpecName "kube-api-access-kw7wd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:44:36 crc kubenswrapper[5021]: I0121 15:44:36.176928 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f72eda61-8750-46bd-88ce-355f8b36acc7-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "f72eda61-8750-46bd-88ce-355f8b36acc7" (UID: "f72eda61-8750-46bd-88ce-355f8b36acc7"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:44:36 crc kubenswrapper[5021]: I0121 15:44:36.185489 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f72eda61-8750-46bd-88ce-355f8b36acc7-scripts" (OuterVolumeSpecName: "scripts") pod "f72eda61-8750-46bd-88ce-355f8b36acc7" (UID: "f72eda61-8750-46bd-88ce-355f8b36acc7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:44:36 crc kubenswrapper[5021]: I0121 15:44:36.198649 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f72eda61-8750-46bd-88ce-355f8b36acc7-config-data" (OuterVolumeSpecName: "config-data") pod "f72eda61-8750-46bd-88ce-355f8b36acc7" (UID: "f72eda61-8750-46bd-88ce-355f8b36acc7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:44:36 crc kubenswrapper[5021]: I0121 15:44:36.205741 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f72eda61-8750-46bd-88ce-355f8b36acc7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f72eda61-8750-46bd-88ce-355f8b36acc7" (UID: "f72eda61-8750-46bd-88ce-355f8b36acc7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:44:36 crc kubenswrapper[5021]: I0121 15:44:36.268127 5021 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/f72eda61-8750-46bd-88ce-355f8b36acc7-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:36 crc kubenswrapper[5021]: I0121 15:44:36.268158 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kw7wd\" (UniqueName: \"kubernetes.io/projected/f72eda61-8750-46bd-88ce-355f8b36acc7-kube-api-access-kw7wd\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:36 crc kubenswrapper[5021]: I0121 15:44:36.268169 5021 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f72eda61-8750-46bd-88ce-355f8b36acc7-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:36 crc kubenswrapper[5021]: I0121 15:44:36.268178 5021 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f72eda61-8750-46bd-88ce-355f8b36acc7-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:36 crc kubenswrapper[5021]: I0121 15:44:36.268186 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f72eda61-8750-46bd-88ce-355f8b36acc7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:36 crc kubenswrapper[5021]: I0121 15:44:36.268195 5021 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/f72eda61-8750-46bd-88ce-355f8b36acc7-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:36 crc kubenswrapper[5021]: I0121 15:44:36.447380 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d5b6d6b67-kprj2" Jan 21 15:44:36 crc kubenswrapper[5021]: I0121 15:44:36.576879 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6722dc85-c75c-4751-b0c9-1a7ffc45afaa-ovsdbserver-nb\") pod \"6722dc85-c75c-4751-b0c9-1a7ffc45afaa\" (UID: \"6722dc85-c75c-4751-b0c9-1a7ffc45afaa\") " Jan 21 15:44:36 crc kubenswrapper[5021]: I0121 15:44:36.576957 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6722dc85-c75c-4751-b0c9-1a7ffc45afaa-config\") pod \"6722dc85-c75c-4751-b0c9-1a7ffc45afaa\" (UID: \"6722dc85-c75c-4751-b0c9-1a7ffc45afaa\") " Jan 21 15:44:36 crc kubenswrapper[5021]: I0121 15:44:36.576996 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wx7lg\" (UniqueName: \"kubernetes.io/projected/6722dc85-c75c-4751-b0c9-1a7ffc45afaa-kube-api-access-wx7lg\") pod \"6722dc85-c75c-4751-b0c9-1a7ffc45afaa\" (UID: \"6722dc85-c75c-4751-b0c9-1a7ffc45afaa\") " Jan 21 15:44:36 crc kubenswrapper[5021]: I0121 15:44:36.577025 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6722dc85-c75c-4751-b0c9-1a7ffc45afaa-dns-svc\") pod \"6722dc85-c75c-4751-b0c9-1a7ffc45afaa\" (UID: \"6722dc85-c75c-4751-b0c9-1a7ffc45afaa\") " Jan 21 15:44:36 crc kubenswrapper[5021]: I0121 15:44:36.577161 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6722dc85-c75c-4751-b0c9-1a7ffc45afaa-ovsdbserver-sb\") pod \"6722dc85-c75c-4751-b0c9-1a7ffc45afaa\" (UID: \"6722dc85-c75c-4751-b0c9-1a7ffc45afaa\") " Jan 21 15:44:36 crc kubenswrapper[5021]: I0121 15:44:36.577187 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6722dc85-c75c-4751-b0c9-1a7ffc45afaa-dns-swift-storage-0\") pod \"6722dc85-c75c-4751-b0c9-1a7ffc45afaa\" (UID: \"6722dc85-c75c-4751-b0c9-1a7ffc45afaa\") " Jan 21 15:44:36 crc kubenswrapper[5021]: I0121 15:44:36.582728 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6722dc85-c75c-4751-b0c9-1a7ffc45afaa-kube-api-access-wx7lg" (OuterVolumeSpecName: "kube-api-access-wx7lg") pod "6722dc85-c75c-4751-b0c9-1a7ffc45afaa" (UID: "6722dc85-c75c-4751-b0c9-1a7ffc45afaa"). InnerVolumeSpecName "kube-api-access-wx7lg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:44:36 crc kubenswrapper[5021]: I0121 15:44:36.624634 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6722dc85-c75c-4751-b0c9-1a7ffc45afaa-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "6722dc85-c75c-4751-b0c9-1a7ffc45afaa" (UID: "6722dc85-c75c-4751-b0c9-1a7ffc45afaa"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:44:36 crc kubenswrapper[5021]: I0121 15:44:36.624787 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6722dc85-c75c-4751-b0c9-1a7ffc45afaa-config" (OuterVolumeSpecName: "config") pod "6722dc85-c75c-4751-b0c9-1a7ffc45afaa" (UID: "6722dc85-c75c-4751-b0c9-1a7ffc45afaa"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:44:36 crc kubenswrapper[5021]: I0121 15:44:36.624994 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6722dc85-c75c-4751-b0c9-1a7ffc45afaa-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "6722dc85-c75c-4751-b0c9-1a7ffc45afaa" (UID: "6722dc85-c75c-4751-b0c9-1a7ffc45afaa"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:44:36 crc kubenswrapper[5021]: I0121 15:44:36.625409 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6722dc85-c75c-4751-b0c9-1a7ffc45afaa-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "6722dc85-c75c-4751-b0c9-1a7ffc45afaa" (UID: "6722dc85-c75c-4751-b0c9-1a7ffc45afaa"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:44:36 crc kubenswrapper[5021]: I0121 15:44:36.625398 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6722dc85-c75c-4751-b0c9-1a7ffc45afaa-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "6722dc85-c75c-4751-b0c9-1a7ffc45afaa" (UID: "6722dc85-c75c-4751-b0c9-1a7ffc45afaa"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:44:36 crc kubenswrapper[5021]: I0121 15:44:36.678958 5021 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6722dc85-c75c-4751-b0c9-1a7ffc45afaa-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:36 crc kubenswrapper[5021]: I0121 15:44:36.679003 5021 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6722dc85-c75c-4751-b0c9-1a7ffc45afaa-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:36 crc kubenswrapper[5021]: I0121 15:44:36.679022 5021 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6722dc85-c75c-4751-b0c9-1a7ffc45afaa-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:36 crc kubenswrapper[5021]: I0121 15:44:36.679034 5021 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6722dc85-c75c-4751-b0c9-1a7ffc45afaa-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:36 crc kubenswrapper[5021]: I0121 15:44:36.679045 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wx7lg\" (UniqueName: \"kubernetes.io/projected/6722dc85-c75c-4751-b0c9-1a7ffc45afaa-kube-api-access-wx7lg\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:36 crc kubenswrapper[5021]: I0121 15:44:36.679056 5021 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6722dc85-c75c-4751-b0c9-1a7ffc45afaa-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 21 15:44:36 crc kubenswrapper[5021]: I0121 15:44:36.821020 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-7k8dl" event={"ID":"f72eda61-8750-46bd-88ce-355f8b36acc7","Type":"ContainerDied","Data":"260db40410ee645b5827bbaa708a5a36009fc69e8e2ada09c1b37064603c15ce"} Jan 21 15:44:36 crc kubenswrapper[5021]: I0121 15:44:36.821050 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-7k8dl" Jan 21 15:44:36 crc kubenswrapper[5021]: I0121 15:44:36.821059 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="260db40410ee645b5827bbaa708a5a36009fc69e8e2ada09c1b37064603c15ce" Jan 21 15:44:36 crc kubenswrapper[5021]: I0121 15:44:36.822968 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d5b6d6b67-kprj2" event={"ID":"6722dc85-c75c-4751-b0c9-1a7ffc45afaa","Type":"ContainerDied","Data":"4738c4c592d9f065ef6f69c26fe3308b6908e1cda1ada00a18599d5294dd65aa"} Jan 21 15:44:36 crc kubenswrapper[5021]: I0121 15:44:36.823019 5021 scope.go:117] "RemoveContainer" containerID="93f595b09f88348355e1e3d069b9cbd62a7fe5a5d5da20d2e57498fab1e77609" Jan 21 15:44:36 crc kubenswrapper[5021]: I0121 15:44:36.823042 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d5b6d6b67-kprj2" Jan 21 15:44:36 crc kubenswrapper[5021]: I0121 15:44:36.846399 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-kprj2"] Jan 21 15:44:36 crc kubenswrapper[5021]: I0121 15:44:36.856073 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-kprj2"] Jan 21 15:44:37 crc kubenswrapper[5021]: I0121 15:44:37.066867 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-7k8dl"] Jan 21 15:44:37 crc kubenswrapper[5021]: I0121 15:44:37.074467 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-7k8dl"] Jan 21 15:44:37 crc kubenswrapper[5021]: I0121 15:44:37.165281 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-gcqqr"] Jan 21 15:44:37 crc kubenswrapper[5021]: E0121 15:44:37.165675 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6722dc85-c75c-4751-b0c9-1a7ffc45afaa" containerName="init" Jan 21 15:44:37 crc kubenswrapper[5021]: I0121 15:44:37.165691 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="6722dc85-c75c-4751-b0c9-1a7ffc45afaa" containerName="init" Jan 21 15:44:37 crc kubenswrapper[5021]: E0121 15:44:37.165709 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6722dc85-c75c-4751-b0c9-1a7ffc45afaa" containerName="dnsmasq-dns" Jan 21 15:44:37 crc kubenswrapper[5021]: I0121 15:44:37.165716 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="6722dc85-c75c-4751-b0c9-1a7ffc45afaa" containerName="dnsmasq-dns" Jan 21 15:44:37 crc kubenswrapper[5021]: E0121 15:44:37.165742 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f72eda61-8750-46bd-88ce-355f8b36acc7" containerName="keystone-bootstrap" Jan 21 15:44:37 crc kubenswrapper[5021]: I0121 15:44:37.165751 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="f72eda61-8750-46bd-88ce-355f8b36acc7" containerName="keystone-bootstrap" Jan 21 15:44:37 crc kubenswrapper[5021]: I0121 15:44:37.165950 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="f72eda61-8750-46bd-88ce-355f8b36acc7" containerName="keystone-bootstrap" Jan 21 15:44:37 crc kubenswrapper[5021]: I0121 15:44:37.165971 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="6722dc85-c75c-4751-b0c9-1a7ffc45afaa" containerName="dnsmasq-dns" Jan 21 15:44:37 crc kubenswrapper[5021]: I0121 15:44:37.166488 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-gcqqr" Jan 21 15:44:37 crc kubenswrapper[5021]: I0121 15:44:37.168847 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 21 15:44:37 crc kubenswrapper[5021]: I0121 15:44:37.169132 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 21 15:44:37 crc kubenswrapper[5021]: I0121 15:44:37.169330 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 21 15:44:37 crc kubenswrapper[5021]: I0121 15:44:37.169514 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-8kl7s" Jan 21 15:44:37 crc kubenswrapper[5021]: I0121 15:44:37.169690 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 21 15:44:37 crc kubenswrapper[5021]: I0121 15:44:37.184042 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-gcqqr"] Jan 21 15:44:37 crc kubenswrapper[5021]: I0121 15:44:37.288801 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7c60e68a-50d2-402d-a040-085c245b9836-scripts\") pod \"keystone-bootstrap-gcqqr\" (UID: \"7c60e68a-50d2-402d-a040-085c245b9836\") " pod="openstack/keystone-bootstrap-gcqqr" Jan 21 15:44:37 crc kubenswrapper[5021]: I0121 15:44:37.289086 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7c60e68a-50d2-402d-a040-085c245b9836-fernet-keys\") pod \"keystone-bootstrap-gcqqr\" (UID: \"7c60e68a-50d2-402d-a040-085c245b9836\") " pod="openstack/keystone-bootstrap-gcqqr" Jan 21 15:44:37 crc kubenswrapper[5021]: I0121 15:44:37.289251 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c60e68a-50d2-402d-a040-085c245b9836-config-data\") pod \"keystone-bootstrap-gcqqr\" (UID: \"7c60e68a-50d2-402d-a040-085c245b9836\") " pod="openstack/keystone-bootstrap-gcqqr" Jan 21 15:44:37 crc kubenswrapper[5021]: I0121 15:44:37.289278 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7c60e68a-50d2-402d-a040-085c245b9836-credential-keys\") pod \"keystone-bootstrap-gcqqr\" (UID: \"7c60e68a-50d2-402d-a040-085c245b9836\") " pod="openstack/keystone-bootstrap-gcqqr" Jan 21 15:44:37 crc kubenswrapper[5021]: I0121 15:44:37.289786 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wpg5q\" (UniqueName: \"kubernetes.io/projected/7c60e68a-50d2-402d-a040-085c245b9836-kube-api-access-wpg5q\") pod \"keystone-bootstrap-gcqqr\" (UID: \"7c60e68a-50d2-402d-a040-085c245b9836\") " pod="openstack/keystone-bootstrap-gcqqr" Jan 21 15:44:37 crc kubenswrapper[5021]: I0121 15:44:37.289994 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c60e68a-50d2-402d-a040-085c245b9836-combined-ca-bundle\") pod \"keystone-bootstrap-gcqqr\" (UID: \"7c60e68a-50d2-402d-a040-085c245b9836\") " pod="openstack/keystone-bootstrap-gcqqr" Jan 21 15:44:37 crc kubenswrapper[5021]: I0121 15:44:37.391676 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7c60e68a-50d2-402d-a040-085c245b9836-fernet-keys\") pod \"keystone-bootstrap-gcqqr\" (UID: \"7c60e68a-50d2-402d-a040-085c245b9836\") " pod="openstack/keystone-bootstrap-gcqqr" Jan 21 15:44:37 crc kubenswrapper[5021]: I0121 15:44:37.391736 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c60e68a-50d2-402d-a040-085c245b9836-config-data\") pod \"keystone-bootstrap-gcqqr\" (UID: \"7c60e68a-50d2-402d-a040-085c245b9836\") " pod="openstack/keystone-bootstrap-gcqqr" Jan 21 15:44:37 crc kubenswrapper[5021]: I0121 15:44:37.391757 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7c60e68a-50d2-402d-a040-085c245b9836-credential-keys\") pod \"keystone-bootstrap-gcqqr\" (UID: \"7c60e68a-50d2-402d-a040-085c245b9836\") " pod="openstack/keystone-bootstrap-gcqqr" Jan 21 15:44:37 crc kubenswrapper[5021]: I0121 15:44:37.391787 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wpg5q\" (UniqueName: \"kubernetes.io/projected/7c60e68a-50d2-402d-a040-085c245b9836-kube-api-access-wpg5q\") pod \"keystone-bootstrap-gcqqr\" (UID: \"7c60e68a-50d2-402d-a040-085c245b9836\") " pod="openstack/keystone-bootstrap-gcqqr" Jan 21 15:44:37 crc kubenswrapper[5021]: I0121 15:44:37.391834 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c60e68a-50d2-402d-a040-085c245b9836-combined-ca-bundle\") pod \"keystone-bootstrap-gcqqr\" (UID: \"7c60e68a-50d2-402d-a040-085c245b9836\") " pod="openstack/keystone-bootstrap-gcqqr" Jan 21 15:44:37 crc kubenswrapper[5021]: I0121 15:44:37.391885 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7c60e68a-50d2-402d-a040-085c245b9836-scripts\") pod \"keystone-bootstrap-gcqqr\" (UID: \"7c60e68a-50d2-402d-a040-085c245b9836\") " pod="openstack/keystone-bootstrap-gcqqr" Jan 21 15:44:37 crc kubenswrapper[5021]: I0121 15:44:37.397799 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7c60e68a-50d2-402d-a040-085c245b9836-scripts\") pod \"keystone-bootstrap-gcqqr\" (UID: \"7c60e68a-50d2-402d-a040-085c245b9836\") " pod="openstack/keystone-bootstrap-gcqqr" Jan 21 15:44:37 crc kubenswrapper[5021]: I0121 15:44:37.398202 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c60e68a-50d2-402d-a040-085c245b9836-combined-ca-bundle\") pod \"keystone-bootstrap-gcqqr\" (UID: \"7c60e68a-50d2-402d-a040-085c245b9836\") " pod="openstack/keystone-bootstrap-gcqqr" Jan 21 15:44:37 crc kubenswrapper[5021]: I0121 15:44:37.398692 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7c60e68a-50d2-402d-a040-085c245b9836-fernet-keys\") pod \"keystone-bootstrap-gcqqr\" (UID: \"7c60e68a-50d2-402d-a040-085c245b9836\") " pod="openstack/keystone-bootstrap-gcqqr" Jan 21 15:44:37 crc kubenswrapper[5021]: I0121 15:44:37.399395 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7c60e68a-50d2-402d-a040-085c245b9836-credential-keys\") pod \"keystone-bootstrap-gcqqr\" (UID: \"7c60e68a-50d2-402d-a040-085c245b9836\") " pod="openstack/keystone-bootstrap-gcqqr" Jan 21 15:44:37 crc kubenswrapper[5021]: I0121 15:44:37.406328 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c60e68a-50d2-402d-a040-085c245b9836-config-data\") pod \"keystone-bootstrap-gcqqr\" (UID: \"7c60e68a-50d2-402d-a040-085c245b9836\") " pod="openstack/keystone-bootstrap-gcqqr" Jan 21 15:44:37 crc kubenswrapper[5021]: I0121 15:44:37.409721 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wpg5q\" (UniqueName: \"kubernetes.io/projected/7c60e68a-50d2-402d-a040-085c245b9836-kube-api-access-wpg5q\") pod \"keystone-bootstrap-gcqqr\" (UID: \"7c60e68a-50d2-402d-a040-085c245b9836\") " pod="openstack/keystone-bootstrap-gcqqr" Jan 21 15:44:37 crc kubenswrapper[5021]: I0121 15:44:37.493491 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-gcqqr" Jan 21 15:44:38 crc kubenswrapper[5021]: I0121 15:44:38.748687 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6722dc85-c75c-4751-b0c9-1a7ffc45afaa" path="/var/lib/kubelet/pods/6722dc85-c75c-4751-b0c9-1a7ffc45afaa/volumes" Jan 21 15:44:38 crc kubenswrapper[5021]: I0121 15:44:38.749883 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f72eda61-8750-46bd-88ce-355f8b36acc7" path="/var/lib/kubelet/pods/f72eda61-8750-46bd-88ce-355f8b36acc7/volumes" Jan 21 15:44:44 crc kubenswrapper[5021]: E0121 15:44:44.513000 5021 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified" Jan 21 15:44:44 crc kubenswrapper[5021]: E0121 15:44:44.513488 5021 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-zghrd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-x6jrz_openstack(52f108a9-a567-4074-88db-05c8c2feea41): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 21 15:44:44 crc kubenswrapper[5021]: E0121 15:44:44.515050 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-x6jrz" podUID="52f108a9-a567-4074-88db-05c8c2feea41" Jan 21 15:44:44 crc kubenswrapper[5021]: E0121 15:44:44.889787 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified\\\"\"" pod="openstack/barbican-db-sync-x6jrz" podUID="52f108a9-a567-4074-88db-05c8c2feea41" Jan 21 15:44:45 crc kubenswrapper[5021]: I0121 15:44:45.543523 5021 scope.go:117] "RemoveContainer" containerID="c3966764226e28e67ed97336fca66009bbc265e4b7084b130fa49364be513ede" Jan 21 15:44:45 crc kubenswrapper[5021]: E0121 15:44:45.564102 5021 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Jan 21 15:44:45 crc kubenswrapper[5021]: E0121 15:44:45.564270 5021 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ql7wj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-995xv_openstack(e369fc7c-044b-47cc-964f-601d7c06f150): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 21 15:44:45 crc kubenswrapper[5021]: E0121 15:44:45.566300 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-995xv" podUID="e369fc7c-044b-47cc-964f-601d7c06f150" Jan 21 15:44:45 crc kubenswrapper[5021]: I0121 15:44:45.899626 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"30607c41-8b77-404d-80e2-905a915c8697","Type":"ContainerStarted","Data":"fdf6b6553f1a35d3ce7348af4b5e289d5a0ccfc8c6fc8bbbbee632cc25491ce8"} Jan 21 15:44:45 crc kubenswrapper[5021]: I0121 15:44:45.906821 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-fxqns" event={"ID":"6b6d6a49-2772-4d64-a665-618dfc7e2035","Type":"ContainerStarted","Data":"120163cff46fcf4d06479ef6e02fb2122698e67e2d06d3253bcc4438b94f5573"} Jan 21 15:44:45 crc kubenswrapper[5021]: E0121 15:44:45.908010 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-995xv" podUID="e369fc7c-044b-47cc-964f-601d7c06f150" Jan 21 15:44:45 crc kubenswrapper[5021]: I0121 15:44:45.940124 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-fxqns" podStartSLOduration=2.223871831 podStartE2EDuration="25.940106841s" podCreationTimestamp="2026-01-21 15:44:20 +0000 UTC" firstStartedPulling="2026-01-21 15:44:21.807620593 +0000 UTC m=+1203.342734482" lastFinishedPulling="2026-01-21 15:44:45.523855593 +0000 UTC m=+1227.058969492" observedRunningTime="2026-01-21 15:44:45.937007288 +0000 UTC m=+1227.472121177" watchObservedRunningTime="2026-01-21 15:44:45.940106841 +0000 UTC m=+1227.475220730" Jan 21 15:44:46 crc kubenswrapper[5021]: I0121 15:44:46.163122 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-gcqqr"] Jan 21 15:44:46 crc kubenswrapper[5021]: W0121 15:44:46.168878 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7c60e68a_50d2_402d_a040_085c245b9836.slice/crio-4c781c17d04726cb8ed009483fe120b10c817755946a5964136924e07a98fab3 WatchSource:0}: Error finding container 4c781c17d04726cb8ed009483fe120b10c817755946a5964136924e07a98fab3: Status 404 returned error can't find the container with id 4c781c17d04726cb8ed009483fe120b10c817755946a5964136924e07a98fab3 Jan 21 15:44:46 crc kubenswrapper[5021]: I0121 15:44:46.196009 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 21 15:44:46 crc kubenswrapper[5021]: W0121 15:44:46.200711 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9b81a4e0_f0d5_4e03_b00f_f77dad44be9c.slice/crio-5b59b99a76c5b195a185fd7112eb954c9a0dcbaaa1e77c98082d68ebfe46d48d WatchSource:0}: Error finding container 5b59b99a76c5b195a185fd7112eb954c9a0dcbaaa1e77c98082d68ebfe46d48d: Status 404 returned error can't find the container with id 5b59b99a76c5b195a185fd7112eb954c9a0dcbaaa1e77c98082d68ebfe46d48d Jan 21 15:44:46 crc kubenswrapper[5021]: I0121 15:44:46.287434 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 21 15:44:46 crc kubenswrapper[5021]: I0121 15:44:46.924184 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-gcqqr" event={"ID":"7c60e68a-50d2-402d-a040-085c245b9836","Type":"ContainerStarted","Data":"f185e33f65cebb8b90363af17b53d1053a78bb996999011fdf8dc5805f5075e4"} Jan 21 15:44:46 crc kubenswrapper[5021]: I0121 15:44:46.924487 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-gcqqr" event={"ID":"7c60e68a-50d2-402d-a040-085c245b9836","Type":"ContainerStarted","Data":"4c781c17d04726cb8ed009483fe120b10c817755946a5964136924e07a98fab3"} Jan 21 15:44:46 crc kubenswrapper[5021]: I0121 15:44:46.927780 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"afc0c781-38b3-4ee6-ae9d-82d8649978cd","Type":"ContainerStarted","Data":"430a5f5e2132c63531aa5e0f7b4202e0c48b4cda0f203ee49ff436a3a55736a3"} Jan 21 15:44:46 crc kubenswrapper[5021]: I0121 15:44:46.930597 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"9b81a4e0-f0d5-4e03-b00f-f77dad44be9c","Type":"ContainerStarted","Data":"3b77e5e1aadd0eec3081d6d317d598f851b02c9638caed1693bb7b2641fb0c29"} Jan 21 15:44:46 crc kubenswrapper[5021]: I0121 15:44:46.930738 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"9b81a4e0-f0d5-4e03-b00f-f77dad44be9c","Type":"ContainerStarted","Data":"5b59b99a76c5b195a185fd7112eb954c9a0dcbaaa1e77c98082d68ebfe46d48d"} Jan 21 15:44:47 crc kubenswrapper[5021]: I0121 15:44:47.945206 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"afc0c781-38b3-4ee6-ae9d-82d8649978cd","Type":"ContainerStarted","Data":"0e38878917aa131287482cb95e934b688fdc45f443e728fae07ac78db6e56453"} Jan 21 15:44:48 crc kubenswrapper[5021]: I0121 15:44:48.774934 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-gcqqr" podStartSLOduration=11.774900009 podStartE2EDuration="11.774900009s" podCreationTimestamp="2026-01-21 15:44:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:44:46.945399403 +0000 UTC m=+1228.480513292" watchObservedRunningTime="2026-01-21 15:44:48.774900009 +0000 UTC m=+1230.310013898" Jan 21 15:44:50 crc kubenswrapper[5021]: I0121 15:44:50.991103 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"30607c41-8b77-404d-80e2-905a915c8697","Type":"ContainerStarted","Data":"adf115904816307cc743c22da68501cc00b6027bea96665629352fc6b7f93333"} Jan 21 15:44:52 crc kubenswrapper[5021]: I0121 15:44:52.002415 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"9b81a4e0-f0d5-4e03-b00f-f77dad44be9c","Type":"ContainerStarted","Data":"06785eb5d86910068797d6bce294bb6783c169d8956563f0bc01666a1d00f6fd"} Jan 21 15:44:52 crc kubenswrapper[5021]: I0121 15:44:52.004986 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"afc0c781-38b3-4ee6-ae9d-82d8649978cd","Type":"ContainerStarted","Data":"69c31b6122e5d3f83bed16119cc67b33a3a0a2d529b5fa15164c3ec270884e3d"} Jan 21 15:44:56 crc kubenswrapper[5021]: I0121 15:44:56.088418 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=24.088394003 podStartE2EDuration="24.088394003s" podCreationTimestamp="2026-01-21 15:44:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:44:56.080333406 +0000 UTC m=+1237.615447295" watchObservedRunningTime="2026-01-21 15:44:56.088394003 +0000 UTC m=+1237.623507892" Jan 21 15:44:56 crc kubenswrapper[5021]: I0121 15:44:56.109783 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=24.109766357 podStartE2EDuration="24.109766357s" podCreationTimestamp="2026-01-21 15:44:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:44:56.108812821 +0000 UTC m=+1237.643926710" watchObservedRunningTime="2026-01-21 15:44:56.109766357 +0000 UTC m=+1237.644880236" Jan 21 15:45:00 crc kubenswrapper[5021]: I0121 15:45:00.158650 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483505-n6gnz"] Jan 21 15:45:00 crc kubenswrapper[5021]: I0121 15:45:00.161609 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483505-n6gnz" Jan 21 15:45:00 crc kubenswrapper[5021]: I0121 15:45:00.165063 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 21 15:45:00 crc kubenswrapper[5021]: I0121 15:45:00.165109 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 21 15:45:00 crc kubenswrapper[5021]: I0121 15:45:00.170291 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483505-n6gnz"] Jan 21 15:45:00 crc kubenswrapper[5021]: I0121 15:45:00.229020 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5b3bf6b3-6efd-4f62-abec-5c886c910ed4-secret-volume\") pod \"collect-profiles-29483505-n6gnz\" (UID: \"5b3bf6b3-6efd-4f62-abec-5c886c910ed4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483505-n6gnz" Jan 21 15:45:00 crc kubenswrapper[5021]: I0121 15:45:00.229121 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5b3bf6b3-6efd-4f62-abec-5c886c910ed4-config-volume\") pod \"collect-profiles-29483505-n6gnz\" (UID: \"5b3bf6b3-6efd-4f62-abec-5c886c910ed4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483505-n6gnz" Jan 21 15:45:00 crc kubenswrapper[5021]: I0121 15:45:00.229267 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rxqdl\" (UniqueName: \"kubernetes.io/projected/5b3bf6b3-6efd-4f62-abec-5c886c910ed4-kube-api-access-rxqdl\") pod \"collect-profiles-29483505-n6gnz\" (UID: \"5b3bf6b3-6efd-4f62-abec-5c886c910ed4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483505-n6gnz" Jan 21 15:45:00 crc kubenswrapper[5021]: I0121 15:45:00.330709 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rxqdl\" (UniqueName: \"kubernetes.io/projected/5b3bf6b3-6efd-4f62-abec-5c886c910ed4-kube-api-access-rxqdl\") pod \"collect-profiles-29483505-n6gnz\" (UID: \"5b3bf6b3-6efd-4f62-abec-5c886c910ed4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483505-n6gnz" Jan 21 15:45:00 crc kubenswrapper[5021]: I0121 15:45:00.330864 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5b3bf6b3-6efd-4f62-abec-5c886c910ed4-secret-volume\") pod \"collect-profiles-29483505-n6gnz\" (UID: \"5b3bf6b3-6efd-4f62-abec-5c886c910ed4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483505-n6gnz" Jan 21 15:45:00 crc kubenswrapper[5021]: I0121 15:45:00.330952 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5b3bf6b3-6efd-4f62-abec-5c886c910ed4-config-volume\") pod \"collect-profiles-29483505-n6gnz\" (UID: \"5b3bf6b3-6efd-4f62-abec-5c886c910ed4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483505-n6gnz" Jan 21 15:45:00 crc kubenswrapper[5021]: I0121 15:45:00.332099 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5b3bf6b3-6efd-4f62-abec-5c886c910ed4-config-volume\") pod \"collect-profiles-29483505-n6gnz\" (UID: \"5b3bf6b3-6efd-4f62-abec-5c886c910ed4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483505-n6gnz" Jan 21 15:45:00 crc kubenswrapper[5021]: I0121 15:45:00.338669 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5b3bf6b3-6efd-4f62-abec-5c886c910ed4-secret-volume\") pod \"collect-profiles-29483505-n6gnz\" (UID: \"5b3bf6b3-6efd-4f62-abec-5c886c910ed4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483505-n6gnz" Jan 21 15:45:00 crc kubenswrapper[5021]: I0121 15:45:00.350514 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rxqdl\" (UniqueName: \"kubernetes.io/projected/5b3bf6b3-6efd-4f62-abec-5c886c910ed4-kube-api-access-rxqdl\") pod \"collect-profiles-29483505-n6gnz\" (UID: \"5b3bf6b3-6efd-4f62-abec-5c886c910ed4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483505-n6gnz" Jan 21 15:45:00 crc kubenswrapper[5021]: I0121 15:45:00.488329 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483505-n6gnz" Jan 21 15:45:03 crc kubenswrapper[5021]: I0121 15:45:03.257207 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 21 15:45:03 crc kubenswrapper[5021]: I0121 15:45:03.258267 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 21 15:45:03 crc kubenswrapper[5021]: I0121 15:45:03.258282 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 21 15:45:03 crc kubenswrapper[5021]: I0121 15:45:03.258292 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 21 15:45:03 crc kubenswrapper[5021]: I0121 15:45:03.270970 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 21 15:45:03 crc kubenswrapper[5021]: I0121 15:45:03.271074 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 21 15:45:03 crc kubenswrapper[5021]: I0121 15:45:03.271096 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 21 15:45:03 crc kubenswrapper[5021]: I0121 15:45:03.271113 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 21 15:45:03 crc kubenswrapper[5021]: I0121 15:45:03.297823 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 21 15:45:03 crc kubenswrapper[5021]: I0121 15:45:03.309269 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 21 15:45:03 crc kubenswrapper[5021]: I0121 15:45:03.309506 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 21 15:45:03 crc kubenswrapper[5021]: I0121 15:45:03.319897 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 21 15:45:03 crc kubenswrapper[5021]: I0121 15:45:03.424904 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483505-n6gnz"] Jan 21 15:45:03 crc kubenswrapper[5021]: W0121 15:45:03.428030 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5b3bf6b3_6efd_4f62_abec_5c886c910ed4.slice/crio-b67e16e0fd551ffd17d179cf002804abf5b588eaff3dec9615847228e9ea987c WatchSource:0}: Error finding container b67e16e0fd551ffd17d179cf002804abf5b588eaff3dec9615847228e9ea987c: Status 404 returned error can't find the container with id b67e16e0fd551ffd17d179cf002804abf5b588eaff3dec9615847228e9ea987c Jan 21 15:45:04 crc kubenswrapper[5021]: I0121 15:45:04.117411 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29483505-n6gnz" event={"ID":"5b3bf6b3-6efd-4f62-abec-5c886c910ed4","Type":"ContainerStarted","Data":"b67e16e0fd551ffd17d179cf002804abf5b588eaff3dec9615847228e9ea987c"} Jan 21 15:45:05 crc kubenswrapper[5021]: I0121 15:45:05.132874 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29483505-n6gnz" event={"ID":"5b3bf6b3-6efd-4f62-abec-5c886c910ed4","Type":"ContainerStarted","Data":"3e713eb40693d4274935a35169f18d43aa9df5f5c88681c4fe83e314963489e5"} Jan 21 15:45:05 crc kubenswrapper[5021]: I0121 15:45:05.156018 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29483505-n6gnz" podStartSLOduration=5.155994545 podStartE2EDuration="5.155994545s" podCreationTimestamp="2026-01-21 15:45:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:45:05.151885684 +0000 UTC m=+1246.686999583" watchObservedRunningTime="2026-01-21 15:45:05.155994545 +0000 UTC m=+1246.691108434" Jan 21 15:45:06 crc kubenswrapper[5021]: I0121 15:45:06.205534 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 21 15:45:06 crc kubenswrapper[5021]: I0121 15:45:06.205995 5021 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 21 15:45:06 crc kubenswrapper[5021]: I0121 15:45:06.759502 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 21 15:45:06 crc kubenswrapper[5021]: I0121 15:45:06.921946 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 21 15:45:06 crc kubenswrapper[5021]: I0121 15:45:06.922327 5021 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 21 15:45:06 crc kubenswrapper[5021]: I0121 15:45:06.926665 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 21 15:45:07 crc kubenswrapper[5021]: I0121 15:45:07.187020 5021 generic.go:334] "Generic (PLEG): container finished" podID="5b3bf6b3-6efd-4f62-abec-5c886c910ed4" containerID="3e713eb40693d4274935a35169f18d43aa9df5f5c88681c4fe83e314963489e5" exitCode=0 Jan 21 15:45:07 crc kubenswrapper[5021]: I0121 15:45:07.187884 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29483505-n6gnz" event={"ID":"5b3bf6b3-6efd-4f62-abec-5c886c910ed4","Type":"ContainerDied","Data":"3e713eb40693d4274935a35169f18d43aa9df5f5c88681c4fe83e314963489e5"} Jan 21 15:45:09 crc kubenswrapper[5021]: I0121 15:45:09.209584 5021 generic.go:334] "Generic (PLEG): container finished" podID="7c60e68a-50d2-402d-a040-085c245b9836" containerID="f185e33f65cebb8b90363af17b53d1053a78bb996999011fdf8dc5805f5075e4" exitCode=0 Jan 21 15:45:09 crc kubenswrapper[5021]: I0121 15:45:09.209772 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-gcqqr" event={"ID":"7c60e68a-50d2-402d-a040-085c245b9836","Type":"ContainerDied","Data":"f185e33f65cebb8b90363af17b53d1053a78bb996999011fdf8dc5805f5075e4"} Jan 21 15:45:10 crc kubenswrapper[5021]: I0121 15:45:10.057361 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483505-n6gnz" Jan 21 15:45:10 crc kubenswrapper[5021]: I0121 15:45:10.122443 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rxqdl\" (UniqueName: \"kubernetes.io/projected/5b3bf6b3-6efd-4f62-abec-5c886c910ed4-kube-api-access-rxqdl\") pod \"5b3bf6b3-6efd-4f62-abec-5c886c910ed4\" (UID: \"5b3bf6b3-6efd-4f62-abec-5c886c910ed4\") " Jan 21 15:45:10 crc kubenswrapper[5021]: I0121 15:45:10.122840 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5b3bf6b3-6efd-4f62-abec-5c886c910ed4-secret-volume\") pod \"5b3bf6b3-6efd-4f62-abec-5c886c910ed4\" (UID: \"5b3bf6b3-6efd-4f62-abec-5c886c910ed4\") " Jan 21 15:45:10 crc kubenswrapper[5021]: I0121 15:45:10.122961 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5b3bf6b3-6efd-4f62-abec-5c886c910ed4-config-volume\") pod \"5b3bf6b3-6efd-4f62-abec-5c886c910ed4\" (UID: \"5b3bf6b3-6efd-4f62-abec-5c886c910ed4\") " Jan 21 15:45:10 crc kubenswrapper[5021]: I0121 15:45:10.123700 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5b3bf6b3-6efd-4f62-abec-5c886c910ed4-config-volume" (OuterVolumeSpecName: "config-volume") pod "5b3bf6b3-6efd-4f62-abec-5c886c910ed4" (UID: "5b3bf6b3-6efd-4f62-abec-5c886c910ed4"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:45:10 crc kubenswrapper[5021]: I0121 15:45:10.128135 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b3bf6b3-6efd-4f62-abec-5c886c910ed4-kube-api-access-rxqdl" (OuterVolumeSpecName: "kube-api-access-rxqdl") pod "5b3bf6b3-6efd-4f62-abec-5c886c910ed4" (UID: "5b3bf6b3-6efd-4f62-abec-5c886c910ed4"). InnerVolumeSpecName "kube-api-access-rxqdl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:45:10 crc kubenswrapper[5021]: I0121 15:45:10.134499 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b3bf6b3-6efd-4f62-abec-5c886c910ed4-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "5b3bf6b3-6efd-4f62-abec-5c886c910ed4" (UID: "5b3bf6b3-6efd-4f62-abec-5c886c910ed4"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:45:10 crc kubenswrapper[5021]: I0121 15:45:10.219597 5021 generic.go:334] "Generic (PLEG): container finished" podID="6b6d6a49-2772-4d64-a665-618dfc7e2035" containerID="120163cff46fcf4d06479ef6e02fb2122698e67e2d06d3253bcc4438b94f5573" exitCode=0 Jan 21 15:45:10 crc kubenswrapper[5021]: I0121 15:45:10.219692 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-fxqns" event={"ID":"6b6d6a49-2772-4d64-a665-618dfc7e2035","Type":"ContainerDied","Data":"120163cff46fcf4d06479ef6e02fb2122698e67e2d06d3253bcc4438b94f5573"} Jan 21 15:45:10 crc kubenswrapper[5021]: I0121 15:45:10.222920 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483505-n6gnz" Jan 21 15:45:10 crc kubenswrapper[5021]: I0121 15:45:10.222949 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29483505-n6gnz" event={"ID":"5b3bf6b3-6efd-4f62-abec-5c886c910ed4","Type":"ContainerDied","Data":"b67e16e0fd551ffd17d179cf002804abf5b588eaff3dec9615847228e9ea987c"} Jan 21 15:45:10 crc kubenswrapper[5021]: I0121 15:45:10.222989 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b67e16e0fd551ffd17d179cf002804abf5b588eaff3dec9615847228e9ea987c" Jan 21 15:45:10 crc kubenswrapper[5021]: I0121 15:45:10.224430 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rxqdl\" (UniqueName: \"kubernetes.io/projected/5b3bf6b3-6efd-4f62-abec-5c886c910ed4-kube-api-access-rxqdl\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:10 crc kubenswrapper[5021]: I0121 15:45:10.224453 5021 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5b3bf6b3-6efd-4f62-abec-5c886c910ed4-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:10 crc kubenswrapper[5021]: I0121 15:45:10.224467 5021 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5b3bf6b3-6efd-4f62-abec-5c886c910ed4-config-volume\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:10 crc kubenswrapper[5021]: I0121 15:45:10.479185 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-gcqqr" Jan 21 15:45:10 crc kubenswrapper[5021]: I0121 15:45:10.629687 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c60e68a-50d2-402d-a040-085c245b9836-config-data\") pod \"7c60e68a-50d2-402d-a040-085c245b9836\" (UID: \"7c60e68a-50d2-402d-a040-085c245b9836\") " Jan 21 15:45:10 crc kubenswrapper[5021]: I0121 15:45:10.630209 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7c60e68a-50d2-402d-a040-085c245b9836-fernet-keys\") pod \"7c60e68a-50d2-402d-a040-085c245b9836\" (UID: \"7c60e68a-50d2-402d-a040-085c245b9836\") " Jan 21 15:45:10 crc kubenswrapper[5021]: I0121 15:45:10.630312 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c60e68a-50d2-402d-a040-085c245b9836-combined-ca-bundle\") pod \"7c60e68a-50d2-402d-a040-085c245b9836\" (UID: \"7c60e68a-50d2-402d-a040-085c245b9836\") " Jan 21 15:45:10 crc kubenswrapper[5021]: I0121 15:45:10.630353 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7c60e68a-50d2-402d-a040-085c245b9836-credential-keys\") pod \"7c60e68a-50d2-402d-a040-085c245b9836\" (UID: \"7c60e68a-50d2-402d-a040-085c245b9836\") " Jan 21 15:45:10 crc kubenswrapper[5021]: I0121 15:45:10.630379 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7c60e68a-50d2-402d-a040-085c245b9836-scripts\") pod \"7c60e68a-50d2-402d-a040-085c245b9836\" (UID: \"7c60e68a-50d2-402d-a040-085c245b9836\") " Jan 21 15:45:10 crc kubenswrapper[5021]: I0121 15:45:10.630422 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wpg5q\" (UniqueName: \"kubernetes.io/projected/7c60e68a-50d2-402d-a040-085c245b9836-kube-api-access-wpg5q\") pod \"7c60e68a-50d2-402d-a040-085c245b9836\" (UID: \"7c60e68a-50d2-402d-a040-085c245b9836\") " Jan 21 15:45:10 crc kubenswrapper[5021]: I0121 15:45:10.635315 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7c60e68a-50d2-402d-a040-085c245b9836-kube-api-access-wpg5q" (OuterVolumeSpecName: "kube-api-access-wpg5q") pod "7c60e68a-50d2-402d-a040-085c245b9836" (UID: "7c60e68a-50d2-402d-a040-085c245b9836"). InnerVolumeSpecName "kube-api-access-wpg5q". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:45:10 crc kubenswrapper[5021]: I0121 15:45:10.635786 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c60e68a-50d2-402d-a040-085c245b9836-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "7c60e68a-50d2-402d-a040-085c245b9836" (UID: "7c60e68a-50d2-402d-a040-085c245b9836"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:45:10 crc kubenswrapper[5021]: I0121 15:45:10.636152 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c60e68a-50d2-402d-a040-085c245b9836-scripts" (OuterVolumeSpecName: "scripts") pod "7c60e68a-50d2-402d-a040-085c245b9836" (UID: "7c60e68a-50d2-402d-a040-085c245b9836"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:45:10 crc kubenswrapper[5021]: I0121 15:45:10.636416 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c60e68a-50d2-402d-a040-085c245b9836-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "7c60e68a-50d2-402d-a040-085c245b9836" (UID: "7c60e68a-50d2-402d-a040-085c245b9836"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:45:10 crc kubenswrapper[5021]: I0121 15:45:10.664269 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c60e68a-50d2-402d-a040-085c245b9836-config-data" (OuterVolumeSpecName: "config-data") pod "7c60e68a-50d2-402d-a040-085c245b9836" (UID: "7c60e68a-50d2-402d-a040-085c245b9836"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:45:10 crc kubenswrapper[5021]: I0121 15:45:10.664312 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c60e68a-50d2-402d-a040-085c245b9836-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7c60e68a-50d2-402d-a040-085c245b9836" (UID: "7c60e68a-50d2-402d-a040-085c245b9836"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:45:10 crc kubenswrapper[5021]: I0121 15:45:10.732601 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c60e68a-50d2-402d-a040-085c245b9836-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:10 crc kubenswrapper[5021]: I0121 15:45:10.732650 5021 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7c60e68a-50d2-402d-a040-085c245b9836-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:10 crc kubenswrapper[5021]: I0121 15:45:10.732664 5021 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7c60e68a-50d2-402d-a040-085c245b9836-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:10 crc kubenswrapper[5021]: I0121 15:45:10.732677 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wpg5q\" (UniqueName: \"kubernetes.io/projected/7c60e68a-50d2-402d-a040-085c245b9836-kube-api-access-wpg5q\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:10 crc kubenswrapper[5021]: I0121 15:45:10.732691 5021 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c60e68a-50d2-402d-a040-085c245b9836-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:10 crc kubenswrapper[5021]: I0121 15:45:10.732706 5021 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7c60e68a-50d2-402d-a040-085c245b9836-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.230851 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-x6jrz" event={"ID":"52f108a9-a567-4074-88db-05c8c2feea41","Type":"ContainerStarted","Data":"5e4bbcdf00bc1e50c5704613a05d600796fcf5cc609a68ac31315e5938f7dcfc"} Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.233159 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-995xv" event={"ID":"e369fc7c-044b-47cc-964f-601d7c06f150","Type":"ContainerStarted","Data":"51eec0552e08bfb2c091d10faebfb1b194d09fd936b7af82ef50e2debeffa6c1"} Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.236244 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-gcqqr" Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.237035 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-gcqqr" event={"ID":"7c60e68a-50d2-402d-a040-085c245b9836","Type":"ContainerDied","Data":"4c781c17d04726cb8ed009483fe120b10c817755946a5964136924e07a98fab3"} Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.237120 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4c781c17d04726cb8ed009483fe120b10c817755946a5964136924e07a98fab3" Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.254443 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-x6jrz" podStartSLOduration=5.042457923 podStartE2EDuration="51.254423637s" podCreationTimestamp="2026-01-21 15:44:20 +0000 UTC" firstStartedPulling="2026-01-21 15:44:21.720404708 +0000 UTC m=+1203.255518597" lastFinishedPulling="2026-01-21 15:45:07.932370422 +0000 UTC m=+1249.467484311" observedRunningTime="2026-01-21 15:45:11.24781241 +0000 UTC m=+1252.782926339" watchObservedRunningTime="2026-01-21 15:45:11.254423637 +0000 UTC m=+1252.789537526" Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.375734 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-548db5cc6d-pjhdh"] Jan 21 15:45:11 crc kubenswrapper[5021]: E0121 15:45:11.376378 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c60e68a-50d2-402d-a040-085c245b9836" containerName="keystone-bootstrap" Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.376396 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c60e68a-50d2-402d-a040-085c245b9836" containerName="keystone-bootstrap" Jan 21 15:45:11 crc kubenswrapper[5021]: E0121 15:45:11.376422 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b3bf6b3-6efd-4f62-abec-5c886c910ed4" containerName="collect-profiles" Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.376428 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b3bf6b3-6efd-4f62-abec-5c886c910ed4" containerName="collect-profiles" Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.376576 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b3bf6b3-6efd-4f62-abec-5c886c910ed4" containerName="collect-profiles" Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.376590 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c60e68a-50d2-402d-a040-085c245b9836" containerName="keystone-bootstrap" Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.384602 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-548db5cc6d-pjhdh" Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.388501 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.388723 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.388877 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-8kl7s" Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.389580 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.389728 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.391551 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.391963 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-548db5cc6d-pjhdh"] Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.443389 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4d4c24c-c623-4b7a-92e2-151d132cdebf-internal-tls-certs\") pod \"keystone-548db5cc6d-pjhdh\" (UID: \"b4d4c24c-c623-4b7a-92e2-151d132cdebf\") " pod="openstack/keystone-548db5cc6d-pjhdh" Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.443460 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4d4c24c-c623-4b7a-92e2-151d132cdebf-config-data\") pod \"keystone-548db5cc6d-pjhdh\" (UID: \"b4d4c24c-c623-4b7a-92e2-151d132cdebf\") " pod="openstack/keystone-548db5cc6d-pjhdh" Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.443488 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4d4c24c-c623-4b7a-92e2-151d132cdebf-public-tls-certs\") pod \"keystone-548db5cc6d-pjhdh\" (UID: \"b4d4c24c-c623-4b7a-92e2-151d132cdebf\") " pod="openstack/keystone-548db5cc6d-pjhdh" Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.443523 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b4d4c24c-c623-4b7a-92e2-151d132cdebf-fernet-keys\") pod \"keystone-548db5cc6d-pjhdh\" (UID: \"b4d4c24c-c623-4b7a-92e2-151d132cdebf\") " pod="openstack/keystone-548db5cc6d-pjhdh" Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.443740 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4d4c24c-c623-4b7a-92e2-151d132cdebf-combined-ca-bundle\") pod \"keystone-548db5cc6d-pjhdh\" (UID: \"b4d4c24c-c623-4b7a-92e2-151d132cdebf\") " pod="openstack/keystone-548db5cc6d-pjhdh" Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.443885 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b4d4c24c-c623-4b7a-92e2-151d132cdebf-credential-keys\") pod \"keystone-548db5cc6d-pjhdh\" (UID: \"b4d4c24c-c623-4b7a-92e2-151d132cdebf\") " pod="openstack/keystone-548db5cc6d-pjhdh" Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.443944 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4d4c24c-c623-4b7a-92e2-151d132cdebf-scripts\") pod \"keystone-548db5cc6d-pjhdh\" (UID: \"b4d4c24c-c623-4b7a-92e2-151d132cdebf\") " pod="openstack/keystone-548db5cc6d-pjhdh" Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.443995 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9gls6\" (UniqueName: \"kubernetes.io/projected/b4d4c24c-c623-4b7a-92e2-151d132cdebf-kube-api-access-9gls6\") pod \"keystone-548db5cc6d-pjhdh\" (UID: \"b4d4c24c-c623-4b7a-92e2-151d132cdebf\") " pod="openstack/keystone-548db5cc6d-pjhdh" Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.545654 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4d4c24c-c623-4b7a-92e2-151d132cdebf-config-data\") pod \"keystone-548db5cc6d-pjhdh\" (UID: \"b4d4c24c-c623-4b7a-92e2-151d132cdebf\") " pod="openstack/keystone-548db5cc6d-pjhdh" Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.545709 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4d4c24c-c623-4b7a-92e2-151d132cdebf-public-tls-certs\") pod \"keystone-548db5cc6d-pjhdh\" (UID: \"b4d4c24c-c623-4b7a-92e2-151d132cdebf\") " pod="openstack/keystone-548db5cc6d-pjhdh" Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.545745 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b4d4c24c-c623-4b7a-92e2-151d132cdebf-fernet-keys\") pod \"keystone-548db5cc6d-pjhdh\" (UID: \"b4d4c24c-c623-4b7a-92e2-151d132cdebf\") " pod="openstack/keystone-548db5cc6d-pjhdh" Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.545828 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4d4c24c-c623-4b7a-92e2-151d132cdebf-combined-ca-bundle\") pod \"keystone-548db5cc6d-pjhdh\" (UID: \"b4d4c24c-c623-4b7a-92e2-151d132cdebf\") " pod="openstack/keystone-548db5cc6d-pjhdh" Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.545867 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b4d4c24c-c623-4b7a-92e2-151d132cdebf-credential-keys\") pod \"keystone-548db5cc6d-pjhdh\" (UID: \"b4d4c24c-c623-4b7a-92e2-151d132cdebf\") " pod="openstack/keystone-548db5cc6d-pjhdh" Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.545892 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4d4c24c-c623-4b7a-92e2-151d132cdebf-scripts\") pod \"keystone-548db5cc6d-pjhdh\" (UID: \"b4d4c24c-c623-4b7a-92e2-151d132cdebf\") " pod="openstack/keystone-548db5cc6d-pjhdh" Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.545946 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9gls6\" (UniqueName: \"kubernetes.io/projected/b4d4c24c-c623-4b7a-92e2-151d132cdebf-kube-api-access-9gls6\") pod \"keystone-548db5cc6d-pjhdh\" (UID: \"b4d4c24c-c623-4b7a-92e2-151d132cdebf\") " pod="openstack/keystone-548db5cc6d-pjhdh" Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.546000 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4d4c24c-c623-4b7a-92e2-151d132cdebf-internal-tls-certs\") pod \"keystone-548db5cc6d-pjhdh\" (UID: \"b4d4c24c-c623-4b7a-92e2-151d132cdebf\") " pod="openstack/keystone-548db5cc6d-pjhdh" Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.555347 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4d4c24c-c623-4b7a-92e2-151d132cdebf-combined-ca-bundle\") pod \"keystone-548db5cc6d-pjhdh\" (UID: \"b4d4c24c-c623-4b7a-92e2-151d132cdebf\") " pod="openstack/keystone-548db5cc6d-pjhdh" Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.556946 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4d4c24c-c623-4b7a-92e2-151d132cdebf-config-data\") pod \"keystone-548db5cc6d-pjhdh\" (UID: \"b4d4c24c-c623-4b7a-92e2-151d132cdebf\") " pod="openstack/keystone-548db5cc6d-pjhdh" Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.557177 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4d4c24c-c623-4b7a-92e2-151d132cdebf-public-tls-certs\") pod \"keystone-548db5cc6d-pjhdh\" (UID: \"b4d4c24c-c623-4b7a-92e2-151d132cdebf\") " pod="openstack/keystone-548db5cc6d-pjhdh" Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.557199 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4d4c24c-c623-4b7a-92e2-151d132cdebf-internal-tls-certs\") pod \"keystone-548db5cc6d-pjhdh\" (UID: \"b4d4c24c-c623-4b7a-92e2-151d132cdebf\") " pod="openstack/keystone-548db5cc6d-pjhdh" Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.564197 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4d4c24c-c623-4b7a-92e2-151d132cdebf-scripts\") pod \"keystone-548db5cc6d-pjhdh\" (UID: \"b4d4c24c-c623-4b7a-92e2-151d132cdebf\") " pod="openstack/keystone-548db5cc6d-pjhdh" Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.571879 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b4d4c24c-c623-4b7a-92e2-151d132cdebf-fernet-keys\") pod \"keystone-548db5cc6d-pjhdh\" (UID: \"b4d4c24c-c623-4b7a-92e2-151d132cdebf\") " pod="openstack/keystone-548db5cc6d-pjhdh" Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.572670 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b4d4c24c-c623-4b7a-92e2-151d132cdebf-credential-keys\") pod \"keystone-548db5cc6d-pjhdh\" (UID: \"b4d4c24c-c623-4b7a-92e2-151d132cdebf\") " pod="openstack/keystone-548db5cc6d-pjhdh" Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.583042 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9gls6\" (UniqueName: \"kubernetes.io/projected/b4d4c24c-c623-4b7a-92e2-151d132cdebf-kube-api-access-9gls6\") pod \"keystone-548db5cc6d-pjhdh\" (UID: \"b4d4c24c-c623-4b7a-92e2-151d132cdebf\") " pod="openstack/keystone-548db5cc6d-pjhdh" Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.704262 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-548db5cc6d-pjhdh" Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.798482 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-fxqns" Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.861553 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b6d6a49-2772-4d64-a665-618dfc7e2035-combined-ca-bundle\") pod \"6b6d6a49-2772-4d64-a665-618dfc7e2035\" (UID: \"6b6d6a49-2772-4d64-a665-618dfc7e2035\") " Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.862274 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b6d6a49-2772-4d64-a665-618dfc7e2035-config-data\") pod \"6b6d6a49-2772-4d64-a665-618dfc7e2035\" (UID: \"6b6d6a49-2772-4d64-a665-618dfc7e2035\") " Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.862463 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6b6d6a49-2772-4d64-a665-618dfc7e2035-scripts\") pod \"6b6d6a49-2772-4d64-a665-618dfc7e2035\" (UID: \"6b6d6a49-2772-4d64-a665-618dfc7e2035\") " Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.862517 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gktpv\" (UniqueName: \"kubernetes.io/projected/6b6d6a49-2772-4d64-a665-618dfc7e2035-kube-api-access-gktpv\") pod \"6b6d6a49-2772-4d64-a665-618dfc7e2035\" (UID: \"6b6d6a49-2772-4d64-a665-618dfc7e2035\") " Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.862554 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6b6d6a49-2772-4d64-a665-618dfc7e2035-logs\") pod \"6b6d6a49-2772-4d64-a665-618dfc7e2035\" (UID: \"6b6d6a49-2772-4d64-a665-618dfc7e2035\") " Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.863537 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6b6d6a49-2772-4d64-a665-618dfc7e2035-logs" (OuterVolumeSpecName: "logs") pod "6b6d6a49-2772-4d64-a665-618dfc7e2035" (UID: "6b6d6a49-2772-4d64-a665-618dfc7e2035"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.866903 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6b6d6a49-2772-4d64-a665-618dfc7e2035-kube-api-access-gktpv" (OuterVolumeSpecName: "kube-api-access-gktpv") pod "6b6d6a49-2772-4d64-a665-618dfc7e2035" (UID: "6b6d6a49-2772-4d64-a665-618dfc7e2035"). InnerVolumeSpecName "kube-api-access-gktpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.879936 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b6d6a49-2772-4d64-a665-618dfc7e2035-scripts" (OuterVolumeSpecName: "scripts") pod "6b6d6a49-2772-4d64-a665-618dfc7e2035" (UID: "6b6d6a49-2772-4d64-a665-618dfc7e2035"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.889806 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b6d6a49-2772-4d64-a665-618dfc7e2035-config-data" (OuterVolumeSpecName: "config-data") pod "6b6d6a49-2772-4d64-a665-618dfc7e2035" (UID: "6b6d6a49-2772-4d64-a665-618dfc7e2035"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.891938 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b6d6a49-2772-4d64-a665-618dfc7e2035-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6b6d6a49-2772-4d64-a665-618dfc7e2035" (UID: "6b6d6a49-2772-4d64-a665-618dfc7e2035"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.964685 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-548db5cc6d-pjhdh"] Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.965601 5021 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6b6d6a49-2772-4d64-a665-618dfc7e2035-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.965674 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gktpv\" (UniqueName: \"kubernetes.io/projected/6b6d6a49-2772-4d64-a665-618dfc7e2035-kube-api-access-gktpv\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.965709 5021 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6b6d6a49-2772-4d64-a665-618dfc7e2035-logs\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.965750 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b6d6a49-2772-4d64-a665-618dfc7e2035-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:11 crc kubenswrapper[5021]: I0121 15:45:11.965764 5021 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b6d6a49-2772-4d64-a665-618dfc7e2035-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:11 crc kubenswrapper[5021]: W0121 15:45:11.968441 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb4d4c24c_c623_4b7a_92e2_151d132cdebf.slice/crio-d942e6a66a7e445a8aac03e15b7fda374a733e183576c824add7a5f9ebbe357d WatchSource:0}: Error finding container d942e6a66a7e445a8aac03e15b7fda374a733e183576c824add7a5f9ebbe357d: Status 404 returned error can't find the container with id d942e6a66a7e445a8aac03e15b7fda374a733e183576c824add7a5f9ebbe357d Jan 21 15:45:12 crc kubenswrapper[5021]: I0121 15:45:12.245310 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-fxqns" Jan 21 15:45:12 crc kubenswrapper[5021]: I0121 15:45:12.245298 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-fxqns" event={"ID":"6b6d6a49-2772-4d64-a665-618dfc7e2035","Type":"ContainerDied","Data":"33aba1c5d555e73151019244279a7016b48265a078363c9435d0922ff40a5ad5"} Jan 21 15:45:12 crc kubenswrapper[5021]: I0121 15:45:12.245894 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="33aba1c5d555e73151019244279a7016b48265a078363c9435d0922ff40a5ad5" Jan 21 15:45:12 crc kubenswrapper[5021]: I0121 15:45:12.246701 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-548db5cc6d-pjhdh" event={"ID":"b4d4c24c-c623-4b7a-92e2-151d132cdebf","Type":"ContainerStarted","Data":"3f4a4bee362fa146509f572eb2487ecaf6587a8240e667f1b0d30966317f0b72"} Jan 21 15:45:12 crc kubenswrapper[5021]: I0121 15:45:12.246734 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-548db5cc6d-pjhdh" event={"ID":"b4d4c24c-c623-4b7a-92e2-151d132cdebf","Type":"ContainerStarted","Data":"d942e6a66a7e445a8aac03e15b7fda374a733e183576c824add7a5f9ebbe357d"} Jan 21 15:45:12 crc kubenswrapper[5021]: I0121 15:45:12.247840 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-548db5cc6d-pjhdh" Jan 21 15:45:12 crc kubenswrapper[5021]: I0121 15:45:12.251207 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"30607c41-8b77-404d-80e2-905a915c8697","Type":"ContainerStarted","Data":"2dab79e969b70484bf0ea5fec8caa787a03fd511bc8678f58d98c3f48b858558"} Jan 21 15:45:12 crc kubenswrapper[5021]: I0121 15:45:12.271119 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-548db5cc6d-pjhdh" podStartSLOduration=1.271101045 podStartE2EDuration="1.271101045s" podCreationTimestamp="2026-01-21 15:45:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:45:12.266542172 +0000 UTC m=+1253.801656061" watchObservedRunningTime="2026-01-21 15:45:12.271101045 +0000 UTC m=+1253.806214934" Jan 21 15:45:12 crc kubenswrapper[5021]: I0121 15:45:12.296080 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-995xv" podStartSLOduration=6.085813099 podStartE2EDuration="52.296059316s" podCreationTimestamp="2026-01-21 15:44:20 +0000 UTC" firstStartedPulling="2026-01-21 15:44:21.719253878 +0000 UTC m=+1203.254367767" lastFinishedPulling="2026-01-21 15:45:07.929500095 +0000 UTC m=+1249.464613984" observedRunningTime="2026-01-21 15:45:12.284926236 +0000 UTC m=+1253.820040125" watchObservedRunningTime="2026-01-21 15:45:12.296059316 +0000 UTC m=+1253.831173205" Jan 21 15:45:12 crc kubenswrapper[5021]: I0121 15:45:12.425764 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-57f8ddbc76-dgfjh"] Jan 21 15:45:12 crc kubenswrapper[5021]: E0121 15:45:12.426151 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b6d6a49-2772-4d64-a665-618dfc7e2035" containerName="placement-db-sync" Jan 21 15:45:12 crc kubenswrapper[5021]: I0121 15:45:12.426169 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b6d6a49-2772-4d64-a665-618dfc7e2035" containerName="placement-db-sync" Jan 21 15:45:12 crc kubenswrapper[5021]: I0121 15:45:12.426357 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="6b6d6a49-2772-4d64-a665-618dfc7e2035" containerName="placement-db-sync" Jan 21 15:45:12 crc kubenswrapper[5021]: I0121 15:45:12.427322 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-57f8ddbc76-dgfjh" Jan 21 15:45:12 crc kubenswrapper[5021]: I0121 15:45:12.430598 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Jan 21 15:45:12 crc kubenswrapper[5021]: I0121 15:45:12.430781 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Jan 21 15:45:12 crc kubenswrapper[5021]: I0121 15:45:12.431173 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Jan 21 15:45:12 crc kubenswrapper[5021]: I0121 15:45:12.431324 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-krlrd" Jan 21 15:45:12 crc kubenswrapper[5021]: I0121 15:45:12.431450 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Jan 21 15:45:12 crc kubenswrapper[5021]: I0121 15:45:12.455014 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-57f8ddbc76-dgfjh"] Jan 21 15:45:12 crc kubenswrapper[5021]: I0121 15:45:12.472635 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/469c5416-c102-43c5-8801-502231a86238-public-tls-certs\") pod \"placement-57f8ddbc76-dgfjh\" (UID: \"469c5416-c102-43c5-8801-502231a86238\") " pod="openstack/placement-57f8ddbc76-dgfjh" Jan 21 15:45:12 crc kubenswrapper[5021]: I0121 15:45:12.472788 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/469c5416-c102-43c5-8801-502231a86238-config-data\") pod \"placement-57f8ddbc76-dgfjh\" (UID: \"469c5416-c102-43c5-8801-502231a86238\") " pod="openstack/placement-57f8ddbc76-dgfjh" Jan 21 15:45:12 crc kubenswrapper[5021]: I0121 15:45:12.472828 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/469c5416-c102-43c5-8801-502231a86238-internal-tls-certs\") pod \"placement-57f8ddbc76-dgfjh\" (UID: \"469c5416-c102-43c5-8801-502231a86238\") " pod="openstack/placement-57f8ddbc76-dgfjh" Jan 21 15:45:12 crc kubenswrapper[5021]: I0121 15:45:12.472857 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/469c5416-c102-43c5-8801-502231a86238-combined-ca-bundle\") pod \"placement-57f8ddbc76-dgfjh\" (UID: \"469c5416-c102-43c5-8801-502231a86238\") " pod="openstack/placement-57f8ddbc76-dgfjh" Jan 21 15:45:12 crc kubenswrapper[5021]: I0121 15:45:12.472938 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vjf57\" (UniqueName: \"kubernetes.io/projected/469c5416-c102-43c5-8801-502231a86238-kube-api-access-vjf57\") pod \"placement-57f8ddbc76-dgfjh\" (UID: \"469c5416-c102-43c5-8801-502231a86238\") " pod="openstack/placement-57f8ddbc76-dgfjh" Jan 21 15:45:12 crc kubenswrapper[5021]: I0121 15:45:12.473009 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/469c5416-c102-43c5-8801-502231a86238-logs\") pod \"placement-57f8ddbc76-dgfjh\" (UID: \"469c5416-c102-43c5-8801-502231a86238\") " pod="openstack/placement-57f8ddbc76-dgfjh" Jan 21 15:45:12 crc kubenswrapper[5021]: I0121 15:45:12.473059 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/469c5416-c102-43c5-8801-502231a86238-scripts\") pod \"placement-57f8ddbc76-dgfjh\" (UID: \"469c5416-c102-43c5-8801-502231a86238\") " pod="openstack/placement-57f8ddbc76-dgfjh" Jan 21 15:45:12 crc kubenswrapper[5021]: I0121 15:45:12.574966 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/469c5416-c102-43c5-8801-502231a86238-logs\") pod \"placement-57f8ddbc76-dgfjh\" (UID: \"469c5416-c102-43c5-8801-502231a86238\") " pod="openstack/placement-57f8ddbc76-dgfjh" Jan 21 15:45:12 crc kubenswrapper[5021]: I0121 15:45:12.575341 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/469c5416-c102-43c5-8801-502231a86238-scripts\") pod \"placement-57f8ddbc76-dgfjh\" (UID: \"469c5416-c102-43c5-8801-502231a86238\") " pod="openstack/placement-57f8ddbc76-dgfjh" Jan 21 15:45:12 crc kubenswrapper[5021]: I0121 15:45:12.575374 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/469c5416-c102-43c5-8801-502231a86238-public-tls-certs\") pod \"placement-57f8ddbc76-dgfjh\" (UID: \"469c5416-c102-43c5-8801-502231a86238\") " pod="openstack/placement-57f8ddbc76-dgfjh" Jan 21 15:45:12 crc kubenswrapper[5021]: I0121 15:45:12.575415 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/469c5416-c102-43c5-8801-502231a86238-logs\") pod \"placement-57f8ddbc76-dgfjh\" (UID: \"469c5416-c102-43c5-8801-502231a86238\") " pod="openstack/placement-57f8ddbc76-dgfjh" Jan 21 15:45:12 crc kubenswrapper[5021]: I0121 15:45:12.575439 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/469c5416-c102-43c5-8801-502231a86238-config-data\") pod \"placement-57f8ddbc76-dgfjh\" (UID: \"469c5416-c102-43c5-8801-502231a86238\") " pod="openstack/placement-57f8ddbc76-dgfjh" Jan 21 15:45:12 crc kubenswrapper[5021]: I0121 15:45:12.575505 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/469c5416-c102-43c5-8801-502231a86238-internal-tls-certs\") pod \"placement-57f8ddbc76-dgfjh\" (UID: \"469c5416-c102-43c5-8801-502231a86238\") " pod="openstack/placement-57f8ddbc76-dgfjh" Jan 21 15:45:12 crc kubenswrapper[5021]: I0121 15:45:12.575556 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/469c5416-c102-43c5-8801-502231a86238-combined-ca-bundle\") pod \"placement-57f8ddbc76-dgfjh\" (UID: \"469c5416-c102-43c5-8801-502231a86238\") " pod="openstack/placement-57f8ddbc76-dgfjh" Jan 21 15:45:12 crc kubenswrapper[5021]: I0121 15:45:12.575653 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vjf57\" (UniqueName: \"kubernetes.io/projected/469c5416-c102-43c5-8801-502231a86238-kube-api-access-vjf57\") pod \"placement-57f8ddbc76-dgfjh\" (UID: \"469c5416-c102-43c5-8801-502231a86238\") " pod="openstack/placement-57f8ddbc76-dgfjh" Jan 21 15:45:12 crc kubenswrapper[5021]: I0121 15:45:12.585514 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/469c5416-c102-43c5-8801-502231a86238-public-tls-certs\") pod \"placement-57f8ddbc76-dgfjh\" (UID: \"469c5416-c102-43c5-8801-502231a86238\") " pod="openstack/placement-57f8ddbc76-dgfjh" Jan 21 15:45:12 crc kubenswrapper[5021]: I0121 15:45:12.586259 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/469c5416-c102-43c5-8801-502231a86238-internal-tls-certs\") pod \"placement-57f8ddbc76-dgfjh\" (UID: \"469c5416-c102-43c5-8801-502231a86238\") " pod="openstack/placement-57f8ddbc76-dgfjh" Jan 21 15:45:12 crc kubenswrapper[5021]: I0121 15:45:12.586613 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/469c5416-c102-43c5-8801-502231a86238-combined-ca-bundle\") pod \"placement-57f8ddbc76-dgfjh\" (UID: \"469c5416-c102-43c5-8801-502231a86238\") " pod="openstack/placement-57f8ddbc76-dgfjh" Jan 21 15:45:12 crc kubenswrapper[5021]: I0121 15:45:12.591246 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/469c5416-c102-43c5-8801-502231a86238-scripts\") pod \"placement-57f8ddbc76-dgfjh\" (UID: \"469c5416-c102-43c5-8801-502231a86238\") " pod="openstack/placement-57f8ddbc76-dgfjh" Jan 21 15:45:12 crc kubenswrapper[5021]: I0121 15:45:12.593889 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/469c5416-c102-43c5-8801-502231a86238-config-data\") pod \"placement-57f8ddbc76-dgfjh\" (UID: \"469c5416-c102-43c5-8801-502231a86238\") " pod="openstack/placement-57f8ddbc76-dgfjh" Jan 21 15:45:12 crc kubenswrapper[5021]: I0121 15:45:12.603026 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vjf57\" (UniqueName: \"kubernetes.io/projected/469c5416-c102-43c5-8801-502231a86238-kube-api-access-vjf57\") pod \"placement-57f8ddbc76-dgfjh\" (UID: \"469c5416-c102-43c5-8801-502231a86238\") " pod="openstack/placement-57f8ddbc76-dgfjh" Jan 21 15:45:12 crc kubenswrapper[5021]: I0121 15:45:12.745021 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-57f8ddbc76-dgfjh" Jan 21 15:45:13 crc kubenswrapper[5021]: I0121 15:45:13.226016 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-57f8ddbc76-dgfjh"] Jan 21 15:45:13 crc kubenswrapper[5021]: W0121 15:45:13.234182 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod469c5416_c102_43c5_8801_502231a86238.slice/crio-3bcbb563925e2a4544eefd2b4d1e327be52343aa34777acfdbac76cacde6fc5f WatchSource:0}: Error finding container 3bcbb563925e2a4544eefd2b4d1e327be52343aa34777acfdbac76cacde6fc5f: Status 404 returned error can't find the container with id 3bcbb563925e2a4544eefd2b4d1e327be52343aa34777acfdbac76cacde6fc5f Jan 21 15:45:13 crc kubenswrapper[5021]: I0121 15:45:13.317809 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-57f8ddbc76-dgfjh" event={"ID":"469c5416-c102-43c5-8801-502231a86238","Type":"ContainerStarted","Data":"3bcbb563925e2a4544eefd2b4d1e327be52343aa34777acfdbac76cacde6fc5f"} Jan 21 15:45:14 crc kubenswrapper[5021]: I0121 15:45:14.331804 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-57f8ddbc76-dgfjh" event={"ID":"469c5416-c102-43c5-8801-502231a86238","Type":"ContainerStarted","Data":"568335520c5abb99b1d9dd2a7aa68f565adae7b72f51cf91144f9ac64fbbdece"} Jan 21 15:45:15 crc kubenswrapper[5021]: I0121 15:45:15.342156 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-57f8ddbc76-dgfjh" event={"ID":"469c5416-c102-43c5-8801-502231a86238","Type":"ContainerStarted","Data":"ed0b25896af93d99f78d4d4db9ef15750f9683c1f7556443210e10912e9c3954"} Jan 21 15:45:15 crc kubenswrapper[5021]: I0121 15:45:15.342570 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-57f8ddbc76-dgfjh" Jan 21 15:45:15 crc kubenswrapper[5021]: I0121 15:45:15.342653 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-57f8ddbc76-dgfjh" Jan 21 15:45:15 crc kubenswrapper[5021]: I0121 15:45:15.372253 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-57f8ddbc76-dgfjh" podStartSLOduration=3.3722274309999998 podStartE2EDuration="3.372227431s" podCreationTimestamp="2026-01-21 15:45:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:45:15.361239856 +0000 UTC m=+1256.896353745" watchObservedRunningTime="2026-01-21 15:45:15.372227431 +0000 UTC m=+1256.907341370" Jan 21 15:45:21 crc kubenswrapper[5021]: I0121 15:45:21.423785 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"30607c41-8b77-404d-80e2-905a915c8697","Type":"ContainerStarted","Data":"b4532749f6c3d297c75eb40a4c509280df337ca9271fbcf1bb7fc9e57ffda08c"} Jan 21 15:45:21 crc kubenswrapper[5021]: I0121 15:45:21.424846 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 21 15:45:21 crc kubenswrapper[5021]: I0121 15:45:21.424632 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="30607c41-8b77-404d-80e2-905a915c8697" containerName="sg-core" containerID="cri-o://2dab79e969b70484bf0ea5fec8caa787a03fd511bc8678f58d98c3f48b858558" gracePeriod=30 Jan 21 15:45:21 crc kubenswrapper[5021]: I0121 15:45:21.423995 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="30607c41-8b77-404d-80e2-905a915c8697" containerName="ceilometer-central-agent" containerID="cri-o://fdf6b6553f1a35d3ce7348af4b5e289d5a0ccfc8c6fc8bbbbee632cc25491ce8" gracePeriod=30 Jan 21 15:45:21 crc kubenswrapper[5021]: I0121 15:45:21.424607 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="30607c41-8b77-404d-80e2-905a915c8697" containerName="proxy-httpd" containerID="cri-o://b4532749f6c3d297c75eb40a4c509280df337ca9271fbcf1bb7fc9e57ffda08c" gracePeriod=30 Jan 21 15:45:21 crc kubenswrapper[5021]: I0121 15:45:21.424652 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="30607c41-8b77-404d-80e2-905a915c8697" containerName="ceilometer-notification-agent" containerID="cri-o://adf115904816307cc743c22da68501cc00b6027bea96665629352fc6b7f93333" gracePeriod=30 Jan 21 15:45:21 crc kubenswrapper[5021]: I0121 15:45:21.428796 5021 generic.go:334] "Generic (PLEG): container finished" podID="52f108a9-a567-4074-88db-05c8c2feea41" containerID="5e4bbcdf00bc1e50c5704613a05d600796fcf5cc609a68ac31315e5938f7dcfc" exitCode=0 Jan 21 15:45:21 crc kubenswrapper[5021]: I0121 15:45:21.428847 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-x6jrz" event={"ID":"52f108a9-a567-4074-88db-05c8c2feea41","Type":"ContainerDied","Data":"5e4bbcdf00bc1e50c5704613a05d600796fcf5cc609a68ac31315e5938f7dcfc"} Jan 21 15:45:21 crc kubenswrapper[5021]: I0121 15:45:21.453543 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.023450314 podStartE2EDuration="1m2.453527684s" podCreationTimestamp="2026-01-21 15:44:19 +0000 UTC" firstStartedPulling="2026-01-21 15:44:21.378662482 +0000 UTC m=+1202.913776371" lastFinishedPulling="2026-01-21 15:45:20.808739852 +0000 UTC m=+1262.343853741" observedRunningTime="2026-01-21 15:45:21.452092096 +0000 UTC m=+1262.987205985" watchObservedRunningTime="2026-01-21 15:45:21.453527684 +0000 UTC m=+1262.988641573" Jan 21 15:45:22 crc kubenswrapper[5021]: I0121 15:45:22.445115 5021 generic.go:334] "Generic (PLEG): container finished" podID="30607c41-8b77-404d-80e2-905a915c8697" containerID="b4532749f6c3d297c75eb40a4c509280df337ca9271fbcf1bb7fc9e57ffda08c" exitCode=0 Jan 21 15:45:22 crc kubenswrapper[5021]: I0121 15:45:22.445147 5021 generic.go:334] "Generic (PLEG): container finished" podID="30607c41-8b77-404d-80e2-905a915c8697" containerID="2dab79e969b70484bf0ea5fec8caa787a03fd511bc8678f58d98c3f48b858558" exitCode=2 Jan 21 15:45:22 crc kubenswrapper[5021]: I0121 15:45:22.445155 5021 generic.go:334] "Generic (PLEG): container finished" podID="30607c41-8b77-404d-80e2-905a915c8697" containerID="fdf6b6553f1a35d3ce7348af4b5e289d5a0ccfc8c6fc8bbbbee632cc25491ce8" exitCode=0 Jan 21 15:45:22 crc kubenswrapper[5021]: I0121 15:45:22.445337 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"30607c41-8b77-404d-80e2-905a915c8697","Type":"ContainerDied","Data":"b4532749f6c3d297c75eb40a4c509280df337ca9271fbcf1bb7fc9e57ffda08c"} Jan 21 15:45:22 crc kubenswrapper[5021]: I0121 15:45:22.445363 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"30607c41-8b77-404d-80e2-905a915c8697","Type":"ContainerDied","Data":"2dab79e969b70484bf0ea5fec8caa787a03fd511bc8678f58d98c3f48b858558"} Jan 21 15:45:22 crc kubenswrapper[5021]: I0121 15:45:22.445373 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"30607c41-8b77-404d-80e2-905a915c8697","Type":"ContainerDied","Data":"fdf6b6553f1a35d3ce7348af4b5e289d5a0ccfc8c6fc8bbbbee632cc25491ce8"} Jan 21 15:45:22 crc kubenswrapper[5021]: I0121 15:45:22.815038 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 21 15:45:22 crc kubenswrapper[5021]: I0121 15:45:22.823517 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-x6jrz" Jan 21 15:45:22 crc kubenswrapper[5021]: I0121 15:45:22.866559 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/30607c41-8b77-404d-80e2-905a915c8697-log-httpd\") pod \"30607c41-8b77-404d-80e2-905a915c8697\" (UID: \"30607c41-8b77-404d-80e2-905a915c8697\") " Jan 21 15:45:22 crc kubenswrapper[5021]: I0121 15:45:22.866639 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/30607c41-8b77-404d-80e2-905a915c8697-scripts\") pod \"30607c41-8b77-404d-80e2-905a915c8697\" (UID: \"30607c41-8b77-404d-80e2-905a915c8697\") " Jan 21 15:45:22 crc kubenswrapper[5021]: I0121 15:45:22.866681 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/52f108a9-a567-4074-88db-05c8c2feea41-db-sync-config-data\") pod \"52f108a9-a567-4074-88db-05c8c2feea41\" (UID: \"52f108a9-a567-4074-88db-05c8c2feea41\") " Jan 21 15:45:22 crc kubenswrapper[5021]: I0121 15:45:22.866725 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/30607c41-8b77-404d-80e2-905a915c8697-sg-core-conf-yaml\") pod \"30607c41-8b77-404d-80e2-905a915c8697\" (UID: \"30607c41-8b77-404d-80e2-905a915c8697\") " Jan 21 15:45:22 crc kubenswrapper[5021]: I0121 15:45:22.866773 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/30607c41-8b77-404d-80e2-905a915c8697-config-data\") pod \"30607c41-8b77-404d-80e2-905a915c8697\" (UID: \"30607c41-8b77-404d-80e2-905a915c8697\") " Jan 21 15:45:22 crc kubenswrapper[5021]: I0121 15:45:22.866899 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/30607c41-8b77-404d-80e2-905a915c8697-run-httpd\") pod \"30607c41-8b77-404d-80e2-905a915c8697\" (UID: \"30607c41-8b77-404d-80e2-905a915c8697\") " Jan 21 15:45:22 crc kubenswrapper[5021]: I0121 15:45:22.866944 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52f108a9-a567-4074-88db-05c8c2feea41-combined-ca-bundle\") pod \"52f108a9-a567-4074-88db-05c8c2feea41\" (UID: \"52f108a9-a567-4074-88db-05c8c2feea41\") " Jan 21 15:45:22 crc kubenswrapper[5021]: I0121 15:45:22.866978 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30607c41-8b77-404d-80e2-905a915c8697-combined-ca-bundle\") pod \"30607c41-8b77-404d-80e2-905a915c8697\" (UID: \"30607c41-8b77-404d-80e2-905a915c8697\") " Jan 21 15:45:22 crc kubenswrapper[5021]: I0121 15:45:22.867002 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-44t5h\" (UniqueName: \"kubernetes.io/projected/30607c41-8b77-404d-80e2-905a915c8697-kube-api-access-44t5h\") pod \"30607c41-8b77-404d-80e2-905a915c8697\" (UID: \"30607c41-8b77-404d-80e2-905a915c8697\") " Jan 21 15:45:22 crc kubenswrapper[5021]: I0121 15:45:22.867077 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zghrd\" (UniqueName: \"kubernetes.io/projected/52f108a9-a567-4074-88db-05c8c2feea41-kube-api-access-zghrd\") pod \"52f108a9-a567-4074-88db-05c8c2feea41\" (UID: \"52f108a9-a567-4074-88db-05c8c2feea41\") " Jan 21 15:45:22 crc kubenswrapper[5021]: I0121 15:45:22.868579 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/30607c41-8b77-404d-80e2-905a915c8697-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "30607c41-8b77-404d-80e2-905a915c8697" (UID: "30607c41-8b77-404d-80e2-905a915c8697"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:45:22 crc kubenswrapper[5021]: I0121 15:45:22.879094 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/30607c41-8b77-404d-80e2-905a915c8697-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "30607c41-8b77-404d-80e2-905a915c8697" (UID: "30607c41-8b77-404d-80e2-905a915c8697"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:45:22 crc kubenswrapper[5021]: I0121 15:45:22.880236 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/30607c41-8b77-404d-80e2-905a915c8697-scripts" (OuterVolumeSpecName: "scripts") pod "30607c41-8b77-404d-80e2-905a915c8697" (UID: "30607c41-8b77-404d-80e2-905a915c8697"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:45:22 crc kubenswrapper[5021]: I0121 15:45:22.881133 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52f108a9-a567-4074-88db-05c8c2feea41-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "52f108a9-a567-4074-88db-05c8c2feea41" (UID: "52f108a9-a567-4074-88db-05c8c2feea41"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:45:22 crc kubenswrapper[5021]: I0121 15:45:22.895287 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/52f108a9-a567-4074-88db-05c8c2feea41-kube-api-access-zghrd" (OuterVolumeSpecName: "kube-api-access-zghrd") pod "52f108a9-a567-4074-88db-05c8c2feea41" (UID: "52f108a9-a567-4074-88db-05c8c2feea41"). InnerVolumeSpecName "kube-api-access-zghrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:45:22 crc kubenswrapper[5021]: I0121 15:45:22.895380 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/30607c41-8b77-404d-80e2-905a915c8697-kube-api-access-44t5h" (OuterVolumeSpecName: "kube-api-access-44t5h") pod "30607c41-8b77-404d-80e2-905a915c8697" (UID: "30607c41-8b77-404d-80e2-905a915c8697"). InnerVolumeSpecName "kube-api-access-44t5h". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:45:22 crc kubenswrapper[5021]: I0121 15:45:22.908243 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/30607c41-8b77-404d-80e2-905a915c8697-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "30607c41-8b77-404d-80e2-905a915c8697" (UID: "30607c41-8b77-404d-80e2-905a915c8697"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:45:22 crc kubenswrapper[5021]: I0121 15:45:22.908427 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52f108a9-a567-4074-88db-05c8c2feea41-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "52f108a9-a567-4074-88db-05c8c2feea41" (UID: "52f108a9-a567-4074-88db-05c8c2feea41"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:45:22 crc kubenswrapper[5021]: I0121 15:45:22.956202 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/30607c41-8b77-404d-80e2-905a915c8697-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "30607c41-8b77-404d-80e2-905a915c8697" (UID: "30607c41-8b77-404d-80e2-905a915c8697"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:45:22 crc kubenswrapper[5021]: I0121 15:45:22.960833 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/30607c41-8b77-404d-80e2-905a915c8697-config-data" (OuterVolumeSpecName: "config-data") pod "30607c41-8b77-404d-80e2-905a915c8697" (UID: "30607c41-8b77-404d-80e2-905a915c8697"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:45:22 crc kubenswrapper[5021]: I0121 15:45:22.968942 5021 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/30607c41-8b77-404d-80e2-905a915c8697-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:22 crc kubenswrapper[5021]: I0121 15:45:22.968979 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52f108a9-a567-4074-88db-05c8c2feea41-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:22 crc kubenswrapper[5021]: I0121 15:45:22.968990 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30607c41-8b77-404d-80e2-905a915c8697-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:22 crc kubenswrapper[5021]: I0121 15:45:22.968998 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-44t5h\" (UniqueName: \"kubernetes.io/projected/30607c41-8b77-404d-80e2-905a915c8697-kube-api-access-44t5h\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:22 crc kubenswrapper[5021]: I0121 15:45:22.969008 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zghrd\" (UniqueName: \"kubernetes.io/projected/52f108a9-a567-4074-88db-05c8c2feea41-kube-api-access-zghrd\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:22 crc kubenswrapper[5021]: I0121 15:45:22.969016 5021 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/30607c41-8b77-404d-80e2-905a915c8697-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:22 crc kubenswrapper[5021]: I0121 15:45:22.969024 5021 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/30607c41-8b77-404d-80e2-905a915c8697-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:22 crc kubenswrapper[5021]: I0121 15:45:22.969032 5021 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/52f108a9-a567-4074-88db-05c8c2feea41-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:22 crc kubenswrapper[5021]: I0121 15:45:22.969040 5021 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/30607c41-8b77-404d-80e2-905a915c8697-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:22 crc kubenswrapper[5021]: I0121 15:45:22.969047 5021 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/30607c41-8b77-404d-80e2-905a915c8697-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.455663 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-x6jrz" event={"ID":"52f108a9-a567-4074-88db-05c8c2feea41","Type":"ContainerDied","Data":"cb1a8f1cbbd9151698bb92dbab98cb8004523fcd3ebf38b406475d5452855d34"} Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.456987 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cb1a8f1cbbd9151698bb92dbab98cb8004523fcd3ebf38b406475d5452855d34" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.455673 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-x6jrz" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.459569 5021 generic.go:334] "Generic (PLEG): container finished" podID="30607c41-8b77-404d-80e2-905a915c8697" containerID="adf115904816307cc743c22da68501cc00b6027bea96665629352fc6b7f93333" exitCode=0 Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.459594 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"30607c41-8b77-404d-80e2-905a915c8697","Type":"ContainerDied","Data":"adf115904816307cc743c22da68501cc00b6027bea96665629352fc6b7f93333"} Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.459625 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"30607c41-8b77-404d-80e2-905a915c8697","Type":"ContainerDied","Data":"b91a2c7ec6e85ea619e3e07e8a7d69a7b6ee1f11f457ae5355df2a8fd867f6c7"} Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.459645 5021 scope.go:117] "RemoveContainer" containerID="b4532749f6c3d297c75eb40a4c509280df337ca9271fbcf1bb7fc9e57ffda08c" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.460105 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.485315 5021 scope.go:117] "RemoveContainer" containerID="2dab79e969b70484bf0ea5fec8caa787a03fd511bc8678f58d98c3f48b858558" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.506974 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.527937 5021 scope.go:117] "RemoveContainer" containerID="adf115904816307cc743c22da68501cc00b6027bea96665629352fc6b7f93333" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.528862 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.558555 5021 scope.go:117] "RemoveContainer" containerID="fdf6b6553f1a35d3ce7348af4b5e289d5a0ccfc8c6fc8bbbbee632cc25491ce8" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.570154 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 21 15:45:23 crc kubenswrapper[5021]: E0121 15:45:23.570858 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52f108a9-a567-4074-88db-05c8c2feea41" containerName="barbican-db-sync" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.570880 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="52f108a9-a567-4074-88db-05c8c2feea41" containerName="barbican-db-sync" Jan 21 15:45:23 crc kubenswrapper[5021]: E0121 15:45:23.570897 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30607c41-8b77-404d-80e2-905a915c8697" containerName="proxy-httpd" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.570937 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="30607c41-8b77-404d-80e2-905a915c8697" containerName="proxy-httpd" Jan 21 15:45:23 crc kubenswrapper[5021]: E0121 15:45:23.570972 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30607c41-8b77-404d-80e2-905a915c8697" containerName="sg-core" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.570982 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="30607c41-8b77-404d-80e2-905a915c8697" containerName="sg-core" Jan 21 15:45:23 crc kubenswrapper[5021]: E0121 15:45:23.571023 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30607c41-8b77-404d-80e2-905a915c8697" containerName="ceilometer-central-agent" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.571034 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="30607c41-8b77-404d-80e2-905a915c8697" containerName="ceilometer-central-agent" Jan 21 15:45:23 crc kubenswrapper[5021]: E0121 15:45:23.571048 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30607c41-8b77-404d-80e2-905a915c8697" containerName="ceilometer-notification-agent" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.571056 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="30607c41-8b77-404d-80e2-905a915c8697" containerName="ceilometer-notification-agent" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.571423 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="52f108a9-a567-4074-88db-05c8c2feea41" containerName="barbican-db-sync" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.571445 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="30607c41-8b77-404d-80e2-905a915c8697" containerName="proxy-httpd" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.571464 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="30607c41-8b77-404d-80e2-905a915c8697" containerName="sg-core" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.571515 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="30607c41-8b77-404d-80e2-905a915c8697" containerName="ceilometer-notification-agent" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.571530 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="30607c41-8b77-404d-80e2-905a915c8697" containerName="ceilometer-central-agent" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.573983 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.577717 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.578163 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.578327 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.602457 5021 scope.go:117] "RemoveContainer" containerID="b4532749f6c3d297c75eb40a4c509280df337ca9271fbcf1bb7fc9e57ffda08c" Jan 21 15:45:23 crc kubenswrapper[5021]: E0121 15:45:23.605837 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b4532749f6c3d297c75eb40a4c509280df337ca9271fbcf1bb7fc9e57ffda08c\": container with ID starting with b4532749f6c3d297c75eb40a4c509280df337ca9271fbcf1bb7fc9e57ffda08c not found: ID does not exist" containerID="b4532749f6c3d297c75eb40a4c509280df337ca9271fbcf1bb7fc9e57ffda08c" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.605887 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b4532749f6c3d297c75eb40a4c509280df337ca9271fbcf1bb7fc9e57ffda08c"} err="failed to get container status \"b4532749f6c3d297c75eb40a4c509280df337ca9271fbcf1bb7fc9e57ffda08c\": rpc error: code = NotFound desc = could not find container \"b4532749f6c3d297c75eb40a4c509280df337ca9271fbcf1bb7fc9e57ffda08c\": container with ID starting with b4532749f6c3d297c75eb40a4c509280df337ca9271fbcf1bb7fc9e57ffda08c not found: ID does not exist" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.605933 5021 scope.go:117] "RemoveContainer" containerID="2dab79e969b70484bf0ea5fec8caa787a03fd511bc8678f58d98c3f48b858558" Jan 21 15:45:23 crc kubenswrapper[5021]: E0121 15:45:23.613534 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2dab79e969b70484bf0ea5fec8caa787a03fd511bc8678f58d98c3f48b858558\": container with ID starting with 2dab79e969b70484bf0ea5fec8caa787a03fd511bc8678f58d98c3f48b858558 not found: ID does not exist" containerID="2dab79e969b70484bf0ea5fec8caa787a03fd511bc8678f58d98c3f48b858558" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.613580 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2dab79e969b70484bf0ea5fec8caa787a03fd511bc8678f58d98c3f48b858558"} err="failed to get container status \"2dab79e969b70484bf0ea5fec8caa787a03fd511bc8678f58d98c3f48b858558\": rpc error: code = NotFound desc = could not find container \"2dab79e969b70484bf0ea5fec8caa787a03fd511bc8678f58d98c3f48b858558\": container with ID starting with 2dab79e969b70484bf0ea5fec8caa787a03fd511bc8678f58d98c3f48b858558 not found: ID does not exist" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.613611 5021 scope.go:117] "RemoveContainer" containerID="adf115904816307cc743c22da68501cc00b6027bea96665629352fc6b7f93333" Jan 21 15:45:23 crc kubenswrapper[5021]: E0121 15:45:23.617553 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"adf115904816307cc743c22da68501cc00b6027bea96665629352fc6b7f93333\": container with ID starting with adf115904816307cc743c22da68501cc00b6027bea96665629352fc6b7f93333 not found: ID does not exist" containerID="adf115904816307cc743c22da68501cc00b6027bea96665629352fc6b7f93333" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.617618 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"adf115904816307cc743c22da68501cc00b6027bea96665629352fc6b7f93333"} err="failed to get container status \"adf115904816307cc743c22da68501cc00b6027bea96665629352fc6b7f93333\": rpc error: code = NotFound desc = could not find container \"adf115904816307cc743c22da68501cc00b6027bea96665629352fc6b7f93333\": container with ID starting with adf115904816307cc743c22da68501cc00b6027bea96665629352fc6b7f93333 not found: ID does not exist" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.617671 5021 scope.go:117] "RemoveContainer" containerID="fdf6b6553f1a35d3ce7348af4b5e289d5a0ccfc8c6fc8bbbbee632cc25491ce8" Jan 21 15:45:23 crc kubenswrapper[5021]: E0121 15:45:23.618166 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fdf6b6553f1a35d3ce7348af4b5e289d5a0ccfc8c6fc8bbbbee632cc25491ce8\": container with ID starting with fdf6b6553f1a35d3ce7348af4b5e289d5a0ccfc8c6fc8bbbbee632cc25491ce8 not found: ID does not exist" containerID="fdf6b6553f1a35d3ce7348af4b5e289d5a0ccfc8c6fc8bbbbee632cc25491ce8" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.618200 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fdf6b6553f1a35d3ce7348af4b5e289d5a0ccfc8c6fc8bbbbee632cc25491ce8"} err="failed to get container status \"fdf6b6553f1a35d3ce7348af4b5e289d5a0ccfc8c6fc8bbbbee632cc25491ce8\": rpc error: code = NotFound desc = could not find container \"fdf6b6553f1a35d3ce7348af4b5e289d5a0ccfc8c6fc8bbbbee632cc25491ce8\": container with ID starting with fdf6b6553f1a35d3ce7348af4b5e289d5a0ccfc8c6fc8bbbbee632cc25491ce8 not found: ID does not exist" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.684753 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/71fa8674-249d-42fa-b95b-bf52592b8998-run-httpd\") pod \"ceilometer-0\" (UID: \"71fa8674-249d-42fa-b95b-bf52592b8998\") " pod="openstack/ceilometer-0" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.684821 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/71fa8674-249d-42fa-b95b-bf52592b8998-log-httpd\") pod \"ceilometer-0\" (UID: \"71fa8674-249d-42fa-b95b-bf52592b8998\") " pod="openstack/ceilometer-0" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.684850 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71fa8674-249d-42fa-b95b-bf52592b8998-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"71fa8674-249d-42fa-b95b-bf52592b8998\") " pod="openstack/ceilometer-0" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.684873 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jh54c\" (UniqueName: \"kubernetes.io/projected/71fa8674-249d-42fa-b95b-bf52592b8998-kube-api-access-jh54c\") pod \"ceilometer-0\" (UID: \"71fa8674-249d-42fa-b95b-bf52592b8998\") " pod="openstack/ceilometer-0" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.684938 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/71fa8674-249d-42fa-b95b-bf52592b8998-scripts\") pod \"ceilometer-0\" (UID: \"71fa8674-249d-42fa-b95b-bf52592b8998\") " pod="openstack/ceilometer-0" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.685021 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/71fa8674-249d-42fa-b95b-bf52592b8998-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"71fa8674-249d-42fa-b95b-bf52592b8998\") " pod="openstack/ceilometer-0" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.685053 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/71fa8674-249d-42fa-b95b-bf52592b8998-config-data\") pod \"ceilometer-0\" (UID: \"71fa8674-249d-42fa-b95b-bf52592b8998\") " pod="openstack/ceilometer-0" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.768642 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-7bc8f89b55-8c6t2"] Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.772514 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-7bc8f89b55-8c6t2" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.774295 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-dzpf7" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.774597 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.779923 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.787115 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/71fa8674-249d-42fa-b95b-bf52592b8998-run-httpd\") pod \"ceilometer-0\" (UID: \"71fa8674-249d-42fa-b95b-bf52592b8998\") " pod="openstack/ceilometer-0" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.787182 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/71fa8674-249d-42fa-b95b-bf52592b8998-log-httpd\") pod \"ceilometer-0\" (UID: \"71fa8674-249d-42fa-b95b-bf52592b8998\") " pod="openstack/ceilometer-0" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.787211 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jh54c\" (UniqueName: \"kubernetes.io/projected/71fa8674-249d-42fa-b95b-bf52592b8998-kube-api-access-jh54c\") pod \"ceilometer-0\" (UID: \"71fa8674-249d-42fa-b95b-bf52592b8998\") " pod="openstack/ceilometer-0" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.787234 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71fa8674-249d-42fa-b95b-bf52592b8998-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"71fa8674-249d-42fa-b95b-bf52592b8998\") " pod="openstack/ceilometer-0" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.787279 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/71fa8674-249d-42fa-b95b-bf52592b8998-scripts\") pod \"ceilometer-0\" (UID: \"71fa8674-249d-42fa-b95b-bf52592b8998\") " pod="openstack/ceilometer-0" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.787366 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/71fa8674-249d-42fa-b95b-bf52592b8998-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"71fa8674-249d-42fa-b95b-bf52592b8998\") " pod="openstack/ceilometer-0" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.787405 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/71fa8674-249d-42fa-b95b-bf52592b8998-config-data\") pod \"ceilometer-0\" (UID: \"71fa8674-249d-42fa-b95b-bf52592b8998\") " pod="openstack/ceilometer-0" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.787594 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-76bc56d748-8glcs"] Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.788398 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/71fa8674-249d-42fa-b95b-bf52592b8998-run-httpd\") pod \"ceilometer-0\" (UID: \"71fa8674-249d-42fa-b95b-bf52592b8998\") " pod="openstack/ceilometer-0" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.788727 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/71fa8674-249d-42fa-b95b-bf52592b8998-log-httpd\") pod \"ceilometer-0\" (UID: \"71fa8674-249d-42fa-b95b-bf52592b8998\") " pod="openstack/ceilometer-0" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.790541 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-76bc56d748-8glcs" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.793668 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/71fa8674-249d-42fa-b95b-bf52592b8998-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"71fa8674-249d-42fa-b95b-bf52592b8998\") " pod="openstack/ceilometer-0" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.795867 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71fa8674-249d-42fa-b95b-bf52592b8998-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"71fa8674-249d-42fa-b95b-bf52592b8998\") " pod="openstack/ceilometer-0" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.797404 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/71fa8674-249d-42fa-b95b-bf52592b8998-config-data\") pod \"ceilometer-0\" (UID: \"71fa8674-249d-42fa-b95b-bf52592b8998\") " pod="openstack/ceilometer-0" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.797866 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/71fa8674-249d-42fa-b95b-bf52592b8998-scripts\") pod \"ceilometer-0\" (UID: \"71fa8674-249d-42fa-b95b-bf52592b8998\") " pod="openstack/ceilometer-0" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.799930 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.803415 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-7bc8f89b55-8c6t2"] Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.812030 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-76bc56d748-8glcs"] Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.825784 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jh54c\" (UniqueName: \"kubernetes.io/projected/71fa8674-249d-42fa-b95b-bf52592b8998-kube-api-access-jh54c\") pod \"ceilometer-0\" (UID: \"71fa8674-249d-42fa-b95b-bf52592b8998\") " pod="openstack/ceilometer-0" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.888826 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e7a38a9d-65cf-48dd-8f36-44a78a53e48f-logs\") pod \"barbican-keystone-listener-76bc56d748-8glcs\" (UID: \"e7a38a9d-65cf-48dd-8f36-44a78a53e48f\") " pod="openstack/barbican-keystone-listener-76bc56d748-8glcs" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.889070 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e7a38a9d-65cf-48dd-8f36-44a78a53e48f-config-data\") pod \"barbican-keystone-listener-76bc56d748-8glcs\" (UID: \"e7a38a9d-65cf-48dd-8f36-44a78a53e48f\") " pod="openstack/barbican-keystone-listener-76bc56d748-8glcs" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.889159 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4d076ab0-b0c8-48a0-baa0-589c99376c72-config-data-custom\") pod \"barbican-worker-7bc8f89b55-8c6t2\" (UID: \"4d076ab0-b0c8-48a0-baa0-589c99376c72\") " pod="openstack/barbican-worker-7bc8f89b55-8c6t2" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.889258 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vrsnl\" (UniqueName: \"kubernetes.io/projected/4d076ab0-b0c8-48a0-baa0-589c99376c72-kube-api-access-vrsnl\") pod \"barbican-worker-7bc8f89b55-8c6t2\" (UID: \"4d076ab0-b0c8-48a0-baa0-589c99376c72\") " pod="openstack/barbican-worker-7bc8f89b55-8c6t2" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.889324 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e7a38a9d-65cf-48dd-8f36-44a78a53e48f-config-data-custom\") pod \"barbican-keystone-listener-76bc56d748-8glcs\" (UID: \"e7a38a9d-65cf-48dd-8f36-44a78a53e48f\") " pod="openstack/barbican-keystone-listener-76bc56d748-8glcs" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.889408 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7a38a9d-65cf-48dd-8f36-44a78a53e48f-combined-ca-bundle\") pod \"barbican-keystone-listener-76bc56d748-8glcs\" (UID: \"e7a38a9d-65cf-48dd-8f36-44a78a53e48f\") " pod="openstack/barbican-keystone-listener-76bc56d748-8glcs" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.889475 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rtmt8\" (UniqueName: \"kubernetes.io/projected/e7a38a9d-65cf-48dd-8f36-44a78a53e48f-kube-api-access-rtmt8\") pod \"barbican-keystone-listener-76bc56d748-8glcs\" (UID: \"e7a38a9d-65cf-48dd-8f36-44a78a53e48f\") " pod="openstack/barbican-keystone-listener-76bc56d748-8glcs" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.889544 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4d076ab0-b0c8-48a0-baa0-589c99376c72-logs\") pod \"barbican-worker-7bc8f89b55-8c6t2\" (UID: \"4d076ab0-b0c8-48a0-baa0-589c99376c72\") " pod="openstack/barbican-worker-7bc8f89b55-8c6t2" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.889622 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4d076ab0-b0c8-48a0-baa0-589c99376c72-config-data\") pod \"barbican-worker-7bc8f89b55-8c6t2\" (UID: \"4d076ab0-b0c8-48a0-baa0-589c99376c72\") " pod="openstack/barbican-worker-7bc8f89b55-8c6t2" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.889706 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d076ab0-b0c8-48a0-baa0-589c99376c72-combined-ca-bundle\") pod \"barbican-worker-7bc8f89b55-8c6t2\" (UID: \"4d076ab0-b0c8-48a0-baa0-589c99376c72\") " pod="openstack/barbican-worker-7bc8f89b55-8c6t2" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.916870 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6d66f584d7-m2bc9"] Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.917658 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 21 15:45:23 crc kubenswrapper[5021]: I0121 15:45:23.918540 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d66f584d7-m2bc9" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.002002 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4e81a6f-5e29-4ba7-8aed-f6726afe6e64-config\") pod \"dnsmasq-dns-6d66f584d7-m2bc9\" (UID: \"a4e81a6f-5e29-4ba7-8aed-f6726afe6e64\") " pod="openstack/dnsmasq-dns-6d66f584d7-m2bc9" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.002641 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d076ab0-b0c8-48a0-baa0-589c99376c72-combined-ca-bundle\") pod \"barbican-worker-7bc8f89b55-8c6t2\" (UID: \"4d076ab0-b0c8-48a0-baa0-589c99376c72\") " pod="openstack/barbican-worker-7bc8f89b55-8c6t2" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.002734 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a4e81a6f-5e29-4ba7-8aed-f6726afe6e64-ovsdbserver-nb\") pod \"dnsmasq-dns-6d66f584d7-m2bc9\" (UID: \"a4e81a6f-5e29-4ba7-8aed-f6726afe6e64\") " pod="openstack/dnsmasq-dns-6d66f584d7-m2bc9" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.002791 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e7a38a9d-65cf-48dd-8f36-44a78a53e48f-logs\") pod \"barbican-keystone-listener-76bc56d748-8glcs\" (UID: \"e7a38a9d-65cf-48dd-8f36-44a78a53e48f\") " pod="openstack/barbican-keystone-listener-76bc56d748-8glcs" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.002958 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e7a38a9d-65cf-48dd-8f36-44a78a53e48f-config-data\") pod \"barbican-keystone-listener-76bc56d748-8glcs\" (UID: \"e7a38a9d-65cf-48dd-8f36-44a78a53e48f\") " pod="openstack/barbican-keystone-listener-76bc56d748-8glcs" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.003039 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a4e81a6f-5e29-4ba7-8aed-f6726afe6e64-dns-svc\") pod \"dnsmasq-dns-6d66f584d7-m2bc9\" (UID: \"a4e81a6f-5e29-4ba7-8aed-f6726afe6e64\") " pod="openstack/dnsmasq-dns-6d66f584d7-m2bc9" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.003084 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rc7tk\" (UniqueName: \"kubernetes.io/projected/a4e81a6f-5e29-4ba7-8aed-f6726afe6e64-kube-api-access-rc7tk\") pod \"dnsmasq-dns-6d66f584d7-m2bc9\" (UID: \"a4e81a6f-5e29-4ba7-8aed-f6726afe6e64\") " pod="openstack/dnsmasq-dns-6d66f584d7-m2bc9" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.003259 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4d076ab0-b0c8-48a0-baa0-589c99376c72-config-data-custom\") pod \"barbican-worker-7bc8f89b55-8c6t2\" (UID: \"4d076ab0-b0c8-48a0-baa0-589c99376c72\") " pod="openstack/barbican-worker-7bc8f89b55-8c6t2" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.003334 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vrsnl\" (UniqueName: \"kubernetes.io/projected/4d076ab0-b0c8-48a0-baa0-589c99376c72-kube-api-access-vrsnl\") pod \"barbican-worker-7bc8f89b55-8c6t2\" (UID: \"4d076ab0-b0c8-48a0-baa0-589c99376c72\") " pod="openstack/barbican-worker-7bc8f89b55-8c6t2" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.003572 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e7a38a9d-65cf-48dd-8f36-44a78a53e48f-config-data-custom\") pod \"barbican-keystone-listener-76bc56d748-8glcs\" (UID: \"e7a38a9d-65cf-48dd-8f36-44a78a53e48f\") " pod="openstack/barbican-keystone-listener-76bc56d748-8glcs" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.003774 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7a38a9d-65cf-48dd-8f36-44a78a53e48f-combined-ca-bundle\") pod \"barbican-keystone-listener-76bc56d748-8glcs\" (UID: \"e7a38a9d-65cf-48dd-8f36-44a78a53e48f\") " pod="openstack/barbican-keystone-listener-76bc56d748-8glcs" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.003824 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rtmt8\" (UniqueName: \"kubernetes.io/projected/e7a38a9d-65cf-48dd-8f36-44a78a53e48f-kube-api-access-rtmt8\") pod \"barbican-keystone-listener-76bc56d748-8glcs\" (UID: \"e7a38a9d-65cf-48dd-8f36-44a78a53e48f\") " pod="openstack/barbican-keystone-listener-76bc56d748-8glcs" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.004120 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a4e81a6f-5e29-4ba7-8aed-f6726afe6e64-dns-swift-storage-0\") pod \"dnsmasq-dns-6d66f584d7-m2bc9\" (UID: \"a4e81a6f-5e29-4ba7-8aed-f6726afe6e64\") " pod="openstack/dnsmasq-dns-6d66f584d7-m2bc9" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.004342 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4d076ab0-b0c8-48a0-baa0-589c99376c72-logs\") pod \"barbican-worker-7bc8f89b55-8c6t2\" (UID: \"4d076ab0-b0c8-48a0-baa0-589c99376c72\") " pod="openstack/barbican-worker-7bc8f89b55-8c6t2" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.004364 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a4e81a6f-5e29-4ba7-8aed-f6726afe6e64-ovsdbserver-sb\") pod \"dnsmasq-dns-6d66f584d7-m2bc9\" (UID: \"a4e81a6f-5e29-4ba7-8aed-f6726afe6e64\") " pod="openstack/dnsmasq-dns-6d66f584d7-m2bc9" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.004585 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4d076ab0-b0c8-48a0-baa0-589c99376c72-config-data\") pod \"barbican-worker-7bc8f89b55-8c6t2\" (UID: \"4d076ab0-b0c8-48a0-baa0-589c99376c72\") " pod="openstack/barbican-worker-7bc8f89b55-8c6t2" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.027594 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e7a38a9d-65cf-48dd-8f36-44a78a53e48f-logs\") pod \"barbican-keystone-listener-76bc56d748-8glcs\" (UID: \"e7a38a9d-65cf-48dd-8f36-44a78a53e48f\") " pod="openstack/barbican-keystone-listener-76bc56d748-8glcs" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.035517 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4d076ab0-b0c8-48a0-baa0-589c99376c72-logs\") pod \"barbican-worker-7bc8f89b55-8c6t2\" (UID: \"4d076ab0-b0c8-48a0-baa0-589c99376c72\") " pod="openstack/barbican-worker-7bc8f89b55-8c6t2" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.039611 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6d66f584d7-m2bc9"] Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.050846 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e7a38a9d-65cf-48dd-8f36-44a78a53e48f-config-data-custom\") pod \"barbican-keystone-listener-76bc56d748-8glcs\" (UID: \"e7a38a9d-65cf-48dd-8f36-44a78a53e48f\") " pod="openstack/barbican-keystone-listener-76bc56d748-8glcs" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.063574 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7a38a9d-65cf-48dd-8f36-44a78a53e48f-combined-ca-bundle\") pod \"barbican-keystone-listener-76bc56d748-8glcs\" (UID: \"e7a38a9d-65cf-48dd-8f36-44a78a53e48f\") " pod="openstack/barbican-keystone-listener-76bc56d748-8glcs" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.113621 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rc7tk\" (UniqueName: \"kubernetes.io/projected/a4e81a6f-5e29-4ba7-8aed-f6726afe6e64-kube-api-access-rc7tk\") pod \"dnsmasq-dns-6d66f584d7-m2bc9\" (UID: \"a4e81a6f-5e29-4ba7-8aed-f6726afe6e64\") " pod="openstack/dnsmasq-dns-6d66f584d7-m2bc9" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.113739 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a4e81a6f-5e29-4ba7-8aed-f6726afe6e64-dns-swift-storage-0\") pod \"dnsmasq-dns-6d66f584d7-m2bc9\" (UID: \"a4e81a6f-5e29-4ba7-8aed-f6726afe6e64\") " pod="openstack/dnsmasq-dns-6d66f584d7-m2bc9" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.113764 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a4e81a6f-5e29-4ba7-8aed-f6726afe6e64-ovsdbserver-sb\") pod \"dnsmasq-dns-6d66f584d7-m2bc9\" (UID: \"a4e81a6f-5e29-4ba7-8aed-f6726afe6e64\") " pod="openstack/dnsmasq-dns-6d66f584d7-m2bc9" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.113827 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4e81a6f-5e29-4ba7-8aed-f6726afe6e64-config\") pod \"dnsmasq-dns-6d66f584d7-m2bc9\" (UID: \"a4e81a6f-5e29-4ba7-8aed-f6726afe6e64\") " pod="openstack/dnsmasq-dns-6d66f584d7-m2bc9" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.113887 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a4e81a6f-5e29-4ba7-8aed-f6726afe6e64-ovsdbserver-nb\") pod \"dnsmasq-dns-6d66f584d7-m2bc9\" (UID: \"a4e81a6f-5e29-4ba7-8aed-f6726afe6e64\") " pod="openstack/dnsmasq-dns-6d66f584d7-m2bc9" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.113981 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a4e81a6f-5e29-4ba7-8aed-f6726afe6e64-dns-svc\") pod \"dnsmasq-dns-6d66f584d7-m2bc9\" (UID: \"a4e81a6f-5e29-4ba7-8aed-f6726afe6e64\") " pod="openstack/dnsmasq-dns-6d66f584d7-m2bc9" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.116583 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d076ab0-b0c8-48a0-baa0-589c99376c72-combined-ca-bundle\") pod \"barbican-worker-7bc8f89b55-8c6t2\" (UID: \"4d076ab0-b0c8-48a0-baa0-589c99376c72\") " pod="openstack/barbican-worker-7bc8f89b55-8c6t2" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.116627 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rtmt8\" (UniqueName: \"kubernetes.io/projected/e7a38a9d-65cf-48dd-8f36-44a78a53e48f-kube-api-access-rtmt8\") pod \"barbican-keystone-listener-76bc56d748-8glcs\" (UID: \"e7a38a9d-65cf-48dd-8f36-44a78a53e48f\") " pod="openstack/barbican-keystone-listener-76bc56d748-8glcs" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.117209 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a4e81a6f-5e29-4ba7-8aed-f6726afe6e64-ovsdbserver-sb\") pod \"dnsmasq-dns-6d66f584d7-m2bc9\" (UID: \"a4e81a6f-5e29-4ba7-8aed-f6726afe6e64\") " pod="openstack/dnsmasq-dns-6d66f584d7-m2bc9" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.119616 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4e81a6f-5e29-4ba7-8aed-f6726afe6e64-config\") pod \"dnsmasq-dns-6d66f584d7-m2bc9\" (UID: \"a4e81a6f-5e29-4ba7-8aed-f6726afe6e64\") " pod="openstack/dnsmasq-dns-6d66f584d7-m2bc9" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.131263 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a4e81a6f-5e29-4ba7-8aed-f6726afe6e64-dns-svc\") pod \"dnsmasq-dns-6d66f584d7-m2bc9\" (UID: \"a4e81a6f-5e29-4ba7-8aed-f6726afe6e64\") " pod="openstack/dnsmasq-dns-6d66f584d7-m2bc9" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.134396 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4d076ab0-b0c8-48a0-baa0-589c99376c72-config-data\") pod \"barbican-worker-7bc8f89b55-8c6t2\" (UID: \"4d076ab0-b0c8-48a0-baa0-589c99376c72\") " pod="openstack/barbican-worker-7bc8f89b55-8c6t2" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.134492 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e7a38a9d-65cf-48dd-8f36-44a78a53e48f-config-data\") pod \"barbican-keystone-listener-76bc56d748-8glcs\" (UID: \"e7a38a9d-65cf-48dd-8f36-44a78a53e48f\") " pod="openstack/barbican-keystone-listener-76bc56d748-8glcs" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.135249 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4d076ab0-b0c8-48a0-baa0-589c99376c72-config-data-custom\") pod \"barbican-worker-7bc8f89b55-8c6t2\" (UID: \"4d076ab0-b0c8-48a0-baa0-589c99376c72\") " pod="openstack/barbican-worker-7bc8f89b55-8c6t2" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.135853 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vrsnl\" (UniqueName: \"kubernetes.io/projected/4d076ab0-b0c8-48a0-baa0-589c99376c72-kube-api-access-vrsnl\") pod \"barbican-worker-7bc8f89b55-8c6t2\" (UID: \"4d076ab0-b0c8-48a0-baa0-589c99376c72\") " pod="openstack/barbican-worker-7bc8f89b55-8c6t2" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.139902 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a4e81a6f-5e29-4ba7-8aed-f6726afe6e64-ovsdbserver-nb\") pod \"dnsmasq-dns-6d66f584d7-m2bc9\" (UID: \"a4e81a6f-5e29-4ba7-8aed-f6726afe6e64\") " pod="openstack/dnsmasq-dns-6d66f584d7-m2bc9" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.141796 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a4e81a6f-5e29-4ba7-8aed-f6726afe6e64-dns-swift-storage-0\") pod \"dnsmasq-dns-6d66f584d7-m2bc9\" (UID: \"a4e81a6f-5e29-4ba7-8aed-f6726afe6e64\") " pod="openstack/dnsmasq-dns-6d66f584d7-m2bc9" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.173662 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rc7tk\" (UniqueName: \"kubernetes.io/projected/a4e81a6f-5e29-4ba7-8aed-f6726afe6e64-kube-api-access-rc7tk\") pod \"dnsmasq-dns-6d66f584d7-m2bc9\" (UID: \"a4e81a6f-5e29-4ba7-8aed-f6726afe6e64\") " pod="openstack/dnsmasq-dns-6d66f584d7-m2bc9" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.199810 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-76bc56d748-8glcs" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.234044 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-7bc8f89b55-8c6t2" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.238067 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d66f584d7-m2bc9" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.238563 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-bb66bbffd-268lb"] Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.240053 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-bb66bbffd-268lb" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.254796 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.256208 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-bb66bbffd-268lb"] Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.337730 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0ee318d-b8a6-4810-9d12-4f27ae1ffe10-combined-ca-bundle\") pod \"barbican-api-bb66bbffd-268lb\" (UID: \"a0ee318d-b8a6-4810-9d12-4f27ae1ffe10\") " pod="openstack/barbican-api-bb66bbffd-268lb" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.337891 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0ee318d-b8a6-4810-9d12-4f27ae1ffe10-config-data\") pod \"barbican-api-bb66bbffd-268lb\" (UID: \"a0ee318d-b8a6-4810-9d12-4f27ae1ffe10\") " pod="openstack/barbican-api-bb66bbffd-268lb" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.337947 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a0ee318d-b8a6-4810-9d12-4f27ae1ffe10-config-data-custom\") pod \"barbican-api-bb66bbffd-268lb\" (UID: \"a0ee318d-b8a6-4810-9d12-4f27ae1ffe10\") " pod="openstack/barbican-api-bb66bbffd-268lb" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.337964 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qgsbg\" (UniqueName: \"kubernetes.io/projected/a0ee318d-b8a6-4810-9d12-4f27ae1ffe10-kube-api-access-qgsbg\") pod \"barbican-api-bb66bbffd-268lb\" (UID: \"a0ee318d-b8a6-4810-9d12-4f27ae1ffe10\") " pod="openstack/barbican-api-bb66bbffd-268lb" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.337989 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a0ee318d-b8a6-4810-9d12-4f27ae1ffe10-logs\") pod \"barbican-api-bb66bbffd-268lb\" (UID: \"a0ee318d-b8a6-4810-9d12-4f27ae1ffe10\") " pod="openstack/barbican-api-bb66bbffd-268lb" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.439198 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0ee318d-b8a6-4810-9d12-4f27ae1ffe10-config-data\") pod \"barbican-api-bb66bbffd-268lb\" (UID: \"a0ee318d-b8a6-4810-9d12-4f27ae1ffe10\") " pod="openstack/barbican-api-bb66bbffd-268lb" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.439264 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qgsbg\" (UniqueName: \"kubernetes.io/projected/a0ee318d-b8a6-4810-9d12-4f27ae1ffe10-kube-api-access-qgsbg\") pod \"barbican-api-bb66bbffd-268lb\" (UID: \"a0ee318d-b8a6-4810-9d12-4f27ae1ffe10\") " pod="openstack/barbican-api-bb66bbffd-268lb" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.439295 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a0ee318d-b8a6-4810-9d12-4f27ae1ffe10-config-data-custom\") pod \"barbican-api-bb66bbffd-268lb\" (UID: \"a0ee318d-b8a6-4810-9d12-4f27ae1ffe10\") " pod="openstack/barbican-api-bb66bbffd-268lb" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.439330 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a0ee318d-b8a6-4810-9d12-4f27ae1ffe10-logs\") pod \"barbican-api-bb66bbffd-268lb\" (UID: \"a0ee318d-b8a6-4810-9d12-4f27ae1ffe10\") " pod="openstack/barbican-api-bb66bbffd-268lb" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.439430 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0ee318d-b8a6-4810-9d12-4f27ae1ffe10-combined-ca-bundle\") pod \"barbican-api-bb66bbffd-268lb\" (UID: \"a0ee318d-b8a6-4810-9d12-4f27ae1ffe10\") " pod="openstack/barbican-api-bb66bbffd-268lb" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.440346 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a0ee318d-b8a6-4810-9d12-4f27ae1ffe10-logs\") pod \"barbican-api-bb66bbffd-268lb\" (UID: \"a0ee318d-b8a6-4810-9d12-4f27ae1ffe10\") " pod="openstack/barbican-api-bb66bbffd-268lb" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.445194 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0ee318d-b8a6-4810-9d12-4f27ae1ffe10-config-data\") pod \"barbican-api-bb66bbffd-268lb\" (UID: \"a0ee318d-b8a6-4810-9d12-4f27ae1ffe10\") " pod="openstack/barbican-api-bb66bbffd-268lb" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.446333 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a0ee318d-b8a6-4810-9d12-4f27ae1ffe10-config-data-custom\") pod \"barbican-api-bb66bbffd-268lb\" (UID: \"a0ee318d-b8a6-4810-9d12-4f27ae1ffe10\") " pod="openstack/barbican-api-bb66bbffd-268lb" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.447565 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0ee318d-b8a6-4810-9d12-4f27ae1ffe10-combined-ca-bundle\") pod \"barbican-api-bb66bbffd-268lb\" (UID: \"a0ee318d-b8a6-4810-9d12-4f27ae1ffe10\") " pod="openstack/barbican-api-bb66bbffd-268lb" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.463212 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qgsbg\" (UniqueName: \"kubernetes.io/projected/a0ee318d-b8a6-4810-9d12-4f27ae1ffe10-kube-api-access-qgsbg\") pod \"barbican-api-bb66bbffd-268lb\" (UID: \"a0ee318d-b8a6-4810-9d12-4f27ae1ffe10\") " pod="openstack/barbican-api-bb66bbffd-268lb" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.602311 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-bb66bbffd-268lb" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.685507 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.749743 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="30607c41-8b77-404d-80e2-905a915c8697" path="/var/lib/kubelet/pods/30607c41-8b77-404d-80e2-905a915c8697/volumes" Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.828060 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-76bc56d748-8glcs"] Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.884567 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-7bc8f89b55-8c6t2"] Jan 21 15:45:24 crc kubenswrapper[5021]: I0121 15:45:24.894073 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6d66f584d7-m2bc9"] Jan 21 15:45:25 crc kubenswrapper[5021]: I0121 15:45:25.083410 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-bb66bbffd-268lb"] Jan 21 15:45:25 crc kubenswrapper[5021]: I0121 15:45:25.489281 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7bc8f89b55-8c6t2" event={"ID":"4d076ab0-b0c8-48a0-baa0-589c99376c72","Type":"ContainerStarted","Data":"67819029e1afeeda9cf789c13df258a83255bf3a2c486502ea66c51dcd815078"} Jan 21 15:45:25 crc kubenswrapper[5021]: I0121 15:45:25.494023 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-bb66bbffd-268lb" event={"ID":"a0ee318d-b8a6-4810-9d12-4f27ae1ffe10","Type":"ContainerStarted","Data":"4db685e2ebf7830711dda7c3e4c0a750beef34f9db60d96ca446d206370a200e"} Jan 21 15:45:25 crc kubenswrapper[5021]: I0121 15:45:25.494071 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-bb66bbffd-268lb" event={"ID":"a0ee318d-b8a6-4810-9d12-4f27ae1ffe10","Type":"ContainerStarted","Data":"7619a0df4c4385ef9b6a2f7b158d72fbd2c98ff0173501c9eba1a9265316a8c9"} Jan 21 15:45:25 crc kubenswrapper[5021]: I0121 15:45:25.502083 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"71fa8674-249d-42fa-b95b-bf52592b8998","Type":"ContainerStarted","Data":"a0037c2b90a36c6f015cf859d28c5ddce8552bc8d7ef42b6ef27223bf6026f8b"} Jan 21 15:45:25 crc kubenswrapper[5021]: I0121 15:45:25.502152 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"71fa8674-249d-42fa-b95b-bf52592b8998","Type":"ContainerStarted","Data":"fd86e1cdc4e18ba2aab0779d8ce6261b2bc164bc56eae7c821f65fd9614f49ce"} Jan 21 15:45:25 crc kubenswrapper[5021]: I0121 15:45:25.504474 5021 generic.go:334] "Generic (PLEG): container finished" podID="a4e81a6f-5e29-4ba7-8aed-f6726afe6e64" containerID="59cd94a631ab3d21db1b67111f84c0246f6443bd659d226a857d5740724e2557" exitCode=0 Jan 21 15:45:25 crc kubenswrapper[5021]: I0121 15:45:25.504583 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d66f584d7-m2bc9" event={"ID":"a4e81a6f-5e29-4ba7-8aed-f6726afe6e64","Type":"ContainerDied","Data":"59cd94a631ab3d21db1b67111f84c0246f6443bd659d226a857d5740724e2557"} Jan 21 15:45:25 crc kubenswrapper[5021]: I0121 15:45:25.504614 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d66f584d7-m2bc9" event={"ID":"a4e81a6f-5e29-4ba7-8aed-f6726afe6e64","Type":"ContainerStarted","Data":"3a0986ec8ff5dd9fe73b8d70914d1af47ac3e25f73a6b979fcdb238116524057"} Jan 21 15:45:25 crc kubenswrapper[5021]: I0121 15:45:25.507959 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-76bc56d748-8glcs" event={"ID":"e7a38a9d-65cf-48dd-8f36-44a78a53e48f","Type":"ContainerStarted","Data":"3a228a68dd38adae7a3b53ceceb41c5424fa365376dd59acb9682e9b94206d80"} Jan 21 15:45:26 crc kubenswrapper[5021]: I0121 15:45:26.522561 5021 generic.go:334] "Generic (PLEG): container finished" podID="e369fc7c-044b-47cc-964f-601d7c06f150" containerID="51eec0552e08bfb2c091d10faebfb1b194d09fd936b7af82ef50e2debeffa6c1" exitCode=0 Jan 21 15:45:26 crc kubenswrapper[5021]: I0121 15:45:26.522889 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-995xv" event={"ID":"e369fc7c-044b-47cc-964f-601d7c06f150","Type":"ContainerDied","Data":"51eec0552e08bfb2c091d10faebfb1b194d09fd936b7af82ef50e2debeffa6c1"} Jan 21 15:45:26 crc kubenswrapper[5021]: I0121 15:45:26.528727 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d66f584d7-m2bc9" event={"ID":"a4e81a6f-5e29-4ba7-8aed-f6726afe6e64","Type":"ContainerStarted","Data":"4a34d9f6ec66f2e4d1c220d85ef44f38360950463cef6a69c179f9757ff5c4f8"} Jan 21 15:45:26 crc kubenswrapper[5021]: I0121 15:45:26.529090 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6d66f584d7-m2bc9" Jan 21 15:45:26 crc kubenswrapper[5021]: I0121 15:45:26.532350 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-bb66bbffd-268lb" event={"ID":"a0ee318d-b8a6-4810-9d12-4f27ae1ffe10","Type":"ContainerStarted","Data":"835ae94f46e8d3a10de61c425c18bc14595bed85ddf2222528e93073ee8ed02d"} Jan 21 15:45:26 crc kubenswrapper[5021]: I0121 15:45:26.532712 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-bb66bbffd-268lb" Jan 21 15:45:26 crc kubenswrapper[5021]: I0121 15:45:26.532877 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-bb66bbffd-268lb" Jan 21 15:45:26 crc kubenswrapper[5021]: I0121 15:45:26.570313 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6d66f584d7-m2bc9" podStartSLOduration=3.57029321 podStartE2EDuration="3.57029321s" podCreationTimestamp="2026-01-21 15:45:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:45:26.563307982 +0000 UTC m=+1268.098421871" watchObservedRunningTime="2026-01-21 15:45:26.57029321 +0000 UTC m=+1268.105407099" Jan 21 15:45:26 crc kubenswrapper[5021]: I0121 15:45:26.584172 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-bb66bbffd-268lb" podStartSLOduration=2.5841489429999998 podStartE2EDuration="2.584148943s" podCreationTimestamp="2026-01-21 15:45:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:45:26.578744057 +0000 UTC m=+1268.113857946" watchObservedRunningTime="2026-01-21 15:45:26.584148943 +0000 UTC m=+1268.119262832" Jan 21 15:45:27 crc kubenswrapper[5021]: I0121 15:45:27.544400 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"71fa8674-249d-42fa-b95b-bf52592b8998","Type":"ContainerStarted","Data":"8c5d90a6ad824fea3ab5ae2edf9cc8362f2364eddb0ab757e7d2232bc9e81433"} Jan 21 15:45:27 crc kubenswrapper[5021]: I0121 15:45:27.556241 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-76bc56d748-8glcs" event={"ID":"e7a38a9d-65cf-48dd-8f36-44a78a53e48f","Type":"ContainerStarted","Data":"233265452bf90deac8f8558f1e900daf7f05dda82bc41b72a507e02c47ad409b"} Jan 21 15:45:27 crc kubenswrapper[5021]: I0121 15:45:27.567233 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7bc8f89b55-8c6t2" event={"ID":"4d076ab0-b0c8-48a0-baa0-589c99376c72","Type":"ContainerStarted","Data":"fcc7d2e930abd6b478f82fcfe23ce92e362592301c9c8fb4f5dea9d2b2bedb88"} Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.166902 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-995xv" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.233651 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e369fc7c-044b-47cc-964f-601d7c06f150-config-data\") pod \"e369fc7c-044b-47cc-964f-601d7c06f150\" (UID: \"e369fc7c-044b-47cc-964f-601d7c06f150\") " Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.233807 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e369fc7c-044b-47cc-964f-601d7c06f150-scripts\") pod \"e369fc7c-044b-47cc-964f-601d7c06f150\" (UID: \"e369fc7c-044b-47cc-964f-601d7c06f150\") " Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.233870 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e369fc7c-044b-47cc-964f-601d7c06f150-combined-ca-bundle\") pod \"e369fc7c-044b-47cc-964f-601d7c06f150\" (UID: \"e369fc7c-044b-47cc-964f-601d7c06f150\") " Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.233960 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ql7wj\" (UniqueName: \"kubernetes.io/projected/e369fc7c-044b-47cc-964f-601d7c06f150-kube-api-access-ql7wj\") pod \"e369fc7c-044b-47cc-964f-601d7c06f150\" (UID: \"e369fc7c-044b-47cc-964f-601d7c06f150\") " Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.234264 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e369fc7c-044b-47cc-964f-601d7c06f150-db-sync-config-data\") pod \"e369fc7c-044b-47cc-964f-601d7c06f150\" (UID: \"e369fc7c-044b-47cc-964f-601d7c06f150\") " Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.234317 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e369fc7c-044b-47cc-964f-601d7c06f150-etc-machine-id\") pod \"e369fc7c-044b-47cc-964f-601d7c06f150\" (UID: \"e369fc7c-044b-47cc-964f-601d7c06f150\") " Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.235368 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e369fc7c-044b-47cc-964f-601d7c06f150-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "e369fc7c-044b-47cc-964f-601d7c06f150" (UID: "e369fc7c-044b-47cc-964f-601d7c06f150"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.236470 5021 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e369fc7c-044b-47cc-964f-601d7c06f150-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.239625 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e369fc7c-044b-47cc-964f-601d7c06f150-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "e369fc7c-044b-47cc-964f-601d7c06f150" (UID: "e369fc7c-044b-47cc-964f-601d7c06f150"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.239700 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e369fc7c-044b-47cc-964f-601d7c06f150-kube-api-access-ql7wj" (OuterVolumeSpecName: "kube-api-access-ql7wj") pod "e369fc7c-044b-47cc-964f-601d7c06f150" (UID: "e369fc7c-044b-47cc-964f-601d7c06f150"). InnerVolumeSpecName "kube-api-access-ql7wj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.240139 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e369fc7c-044b-47cc-964f-601d7c06f150-scripts" (OuterVolumeSpecName: "scripts") pod "e369fc7c-044b-47cc-964f-601d7c06f150" (UID: "e369fc7c-044b-47cc-964f-601d7c06f150"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.263180 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e369fc7c-044b-47cc-964f-601d7c06f150-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e369fc7c-044b-47cc-964f-601d7c06f150" (UID: "e369fc7c-044b-47cc-964f-601d7c06f150"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.305119 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e369fc7c-044b-47cc-964f-601d7c06f150-config-data" (OuterVolumeSpecName: "config-data") pod "e369fc7c-044b-47cc-964f-601d7c06f150" (UID: "e369fc7c-044b-47cc-964f-601d7c06f150"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.338335 5021 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e369fc7c-044b-47cc-964f-601d7c06f150-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.338375 5021 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e369fc7c-044b-47cc-964f-601d7c06f150-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.338387 5021 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e369fc7c-044b-47cc-964f-601d7c06f150-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.338395 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e369fc7c-044b-47cc-964f-601d7c06f150-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.338404 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ql7wj\" (UniqueName: \"kubernetes.io/projected/e369fc7c-044b-47cc-964f-601d7c06f150-kube-api-access-ql7wj\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.494721 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-7b8886d4fd-qn9sz"] Jan 21 15:45:28 crc kubenswrapper[5021]: E0121 15:45:28.495253 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e369fc7c-044b-47cc-964f-601d7c06f150" containerName="cinder-db-sync" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.495271 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="e369fc7c-044b-47cc-964f-601d7c06f150" containerName="cinder-db-sync" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.495490 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="e369fc7c-044b-47cc-964f-601d7c06f150" containerName="cinder-db-sync" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.496523 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7b8886d4fd-qn9sz" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.499525 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.501095 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.522416 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-7b8886d4fd-qn9sz"] Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.592269 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-995xv" event={"ID":"e369fc7c-044b-47cc-964f-601d7c06f150","Type":"ContainerDied","Data":"f993a4696d15902b0ea64e9d041d0d825a69de9e4e7ec0d11929ce1dac2b6193"} Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.592309 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f993a4696d15902b0ea64e9d041d0d825a69de9e4e7ec0d11929ce1dac2b6193" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.592370 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-995xv" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.643937 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fad66107-0589-4ed8-94dc-fd29f2f58c43-public-tls-certs\") pod \"barbican-api-7b8886d4fd-qn9sz\" (UID: \"fad66107-0589-4ed8-94dc-fd29f2f58c43\") " pod="openstack/barbican-api-7b8886d4fd-qn9sz" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.644004 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fad66107-0589-4ed8-94dc-fd29f2f58c43-internal-tls-certs\") pod \"barbican-api-7b8886d4fd-qn9sz\" (UID: \"fad66107-0589-4ed8-94dc-fd29f2f58c43\") " pod="openstack/barbican-api-7b8886d4fd-qn9sz" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.644049 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-br6k2\" (UniqueName: \"kubernetes.io/projected/fad66107-0589-4ed8-94dc-fd29f2f58c43-kube-api-access-br6k2\") pod \"barbican-api-7b8886d4fd-qn9sz\" (UID: \"fad66107-0589-4ed8-94dc-fd29f2f58c43\") " pod="openstack/barbican-api-7b8886d4fd-qn9sz" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.644114 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fad66107-0589-4ed8-94dc-fd29f2f58c43-config-data\") pod \"barbican-api-7b8886d4fd-qn9sz\" (UID: \"fad66107-0589-4ed8-94dc-fd29f2f58c43\") " pod="openstack/barbican-api-7b8886d4fd-qn9sz" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.644200 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fad66107-0589-4ed8-94dc-fd29f2f58c43-config-data-custom\") pod \"barbican-api-7b8886d4fd-qn9sz\" (UID: \"fad66107-0589-4ed8-94dc-fd29f2f58c43\") " pod="openstack/barbican-api-7b8886d4fd-qn9sz" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.644283 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fad66107-0589-4ed8-94dc-fd29f2f58c43-combined-ca-bundle\") pod \"barbican-api-7b8886d4fd-qn9sz\" (UID: \"fad66107-0589-4ed8-94dc-fd29f2f58c43\") " pod="openstack/barbican-api-7b8886d4fd-qn9sz" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.644406 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fad66107-0589-4ed8-94dc-fd29f2f58c43-logs\") pod \"barbican-api-7b8886d4fd-qn9sz\" (UID: \"fad66107-0589-4ed8-94dc-fd29f2f58c43\") " pod="openstack/barbican-api-7b8886d4fd-qn9sz" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.746337 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fad66107-0589-4ed8-94dc-fd29f2f58c43-logs\") pod \"barbican-api-7b8886d4fd-qn9sz\" (UID: \"fad66107-0589-4ed8-94dc-fd29f2f58c43\") " pod="openstack/barbican-api-7b8886d4fd-qn9sz" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.746727 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fad66107-0589-4ed8-94dc-fd29f2f58c43-public-tls-certs\") pod \"barbican-api-7b8886d4fd-qn9sz\" (UID: \"fad66107-0589-4ed8-94dc-fd29f2f58c43\") " pod="openstack/barbican-api-7b8886d4fd-qn9sz" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.746755 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fad66107-0589-4ed8-94dc-fd29f2f58c43-internal-tls-certs\") pod \"barbican-api-7b8886d4fd-qn9sz\" (UID: \"fad66107-0589-4ed8-94dc-fd29f2f58c43\") " pod="openstack/barbican-api-7b8886d4fd-qn9sz" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.746788 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-br6k2\" (UniqueName: \"kubernetes.io/projected/fad66107-0589-4ed8-94dc-fd29f2f58c43-kube-api-access-br6k2\") pod \"barbican-api-7b8886d4fd-qn9sz\" (UID: \"fad66107-0589-4ed8-94dc-fd29f2f58c43\") " pod="openstack/barbican-api-7b8886d4fd-qn9sz" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.746833 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fad66107-0589-4ed8-94dc-fd29f2f58c43-config-data\") pod \"barbican-api-7b8886d4fd-qn9sz\" (UID: \"fad66107-0589-4ed8-94dc-fd29f2f58c43\") " pod="openstack/barbican-api-7b8886d4fd-qn9sz" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.746899 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fad66107-0589-4ed8-94dc-fd29f2f58c43-config-data-custom\") pod \"barbican-api-7b8886d4fd-qn9sz\" (UID: \"fad66107-0589-4ed8-94dc-fd29f2f58c43\") " pod="openstack/barbican-api-7b8886d4fd-qn9sz" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.746937 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fad66107-0589-4ed8-94dc-fd29f2f58c43-logs\") pod \"barbican-api-7b8886d4fd-qn9sz\" (UID: \"fad66107-0589-4ed8-94dc-fd29f2f58c43\") " pod="openstack/barbican-api-7b8886d4fd-qn9sz" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.746985 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fad66107-0589-4ed8-94dc-fd29f2f58c43-combined-ca-bundle\") pod \"barbican-api-7b8886d4fd-qn9sz\" (UID: \"fad66107-0589-4ed8-94dc-fd29f2f58c43\") " pod="openstack/barbican-api-7b8886d4fd-qn9sz" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.760633 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fad66107-0589-4ed8-94dc-fd29f2f58c43-public-tls-certs\") pod \"barbican-api-7b8886d4fd-qn9sz\" (UID: \"fad66107-0589-4ed8-94dc-fd29f2f58c43\") " pod="openstack/barbican-api-7b8886d4fd-qn9sz" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.769296 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fad66107-0589-4ed8-94dc-fd29f2f58c43-config-data\") pod \"barbican-api-7b8886d4fd-qn9sz\" (UID: \"fad66107-0589-4ed8-94dc-fd29f2f58c43\") " pod="openstack/barbican-api-7b8886d4fd-qn9sz" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.773053 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.775634 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fad66107-0589-4ed8-94dc-fd29f2f58c43-config-data-custom\") pod \"barbican-api-7b8886d4fd-qn9sz\" (UID: \"fad66107-0589-4ed8-94dc-fd29f2f58c43\") " pod="openstack/barbican-api-7b8886d4fd-qn9sz" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.775732 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.776737 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fad66107-0589-4ed8-94dc-fd29f2f58c43-combined-ca-bundle\") pod \"barbican-api-7b8886d4fd-qn9sz\" (UID: \"fad66107-0589-4ed8-94dc-fd29f2f58c43\") " pod="openstack/barbican-api-7b8886d4fd-qn9sz" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.776996 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fad66107-0589-4ed8-94dc-fd29f2f58c43-internal-tls-certs\") pod \"barbican-api-7b8886d4fd-qn9sz\" (UID: \"fad66107-0589-4ed8-94dc-fd29f2f58c43\") " pod="openstack/barbican-api-7b8886d4fd-qn9sz" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.782566 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.782761 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-gft5b" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.782887 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.783700 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.817111 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-br6k2\" (UniqueName: \"kubernetes.io/projected/fad66107-0589-4ed8-94dc-fd29f2f58c43-kube-api-access-br6k2\") pod \"barbican-api-7b8886d4fd-qn9sz\" (UID: \"fad66107-0589-4ed8-94dc-fd29f2f58c43\") " pod="openstack/barbican-api-7b8886d4fd-qn9sz" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.826590 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.860267 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4d915556-0d78-4e5b-99fa-0c11bad2d1c8-scripts\") pod \"cinder-scheduler-0\" (UID: \"4d915556-0d78-4e5b-99fa-0c11bad2d1c8\") " pod="openstack/cinder-scheduler-0" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.860327 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4d915556-0d78-4e5b-99fa-0c11bad2d1c8-config-data\") pod \"cinder-scheduler-0\" (UID: \"4d915556-0d78-4e5b-99fa-0c11bad2d1c8\") " pod="openstack/cinder-scheduler-0" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.860401 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4d915556-0d78-4e5b-99fa-0c11bad2d1c8-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"4d915556-0d78-4e5b-99fa-0c11bad2d1c8\") " pod="openstack/cinder-scheduler-0" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.860427 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z97vx\" (UniqueName: \"kubernetes.io/projected/4d915556-0d78-4e5b-99fa-0c11bad2d1c8-kube-api-access-z97vx\") pod \"cinder-scheduler-0\" (UID: \"4d915556-0d78-4e5b-99fa-0c11bad2d1c8\") " pod="openstack/cinder-scheduler-0" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.860475 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4d915556-0d78-4e5b-99fa-0c11bad2d1c8-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"4d915556-0d78-4e5b-99fa-0c11bad2d1c8\") " pod="openstack/cinder-scheduler-0" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.860514 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d915556-0d78-4e5b-99fa-0c11bad2d1c8-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"4d915556-0d78-4e5b-99fa-0c11bad2d1c8\") " pod="openstack/cinder-scheduler-0" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.905351 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7b8886d4fd-qn9sz" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.969222 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6d66f584d7-m2bc9"] Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.969443 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6d66f584d7-m2bc9" podUID="a4e81a6f-5e29-4ba7-8aed-f6726afe6e64" containerName="dnsmasq-dns" containerID="cri-o://4a34d9f6ec66f2e4d1c220d85ef44f38360950463cef6a69c179f9757ff5c4f8" gracePeriod=10 Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.971067 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4d915556-0d78-4e5b-99fa-0c11bad2d1c8-scripts\") pod \"cinder-scheduler-0\" (UID: \"4d915556-0d78-4e5b-99fa-0c11bad2d1c8\") " pod="openstack/cinder-scheduler-0" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.971368 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4d915556-0d78-4e5b-99fa-0c11bad2d1c8-config-data\") pod \"cinder-scheduler-0\" (UID: \"4d915556-0d78-4e5b-99fa-0c11bad2d1c8\") " pod="openstack/cinder-scheduler-0" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.971448 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4d915556-0d78-4e5b-99fa-0c11bad2d1c8-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"4d915556-0d78-4e5b-99fa-0c11bad2d1c8\") " pod="openstack/cinder-scheduler-0" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.971470 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z97vx\" (UniqueName: \"kubernetes.io/projected/4d915556-0d78-4e5b-99fa-0c11bad2d1c8-kube-api-access-z97vx\") pod \"cinder-scheduler-0\" (UID: \"4d915556-0d78-4e5b-99fa-0c11bad2d1c8\") " pod="openstack/cinder-scheduler-0" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.971535 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4d915556-0d78-4e5b-99fa-0c11bad2d1c8-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"4d915556-0d78-4e5b-99fa-0c11bad2d1c8\") " pod="openstack/cinder-scheduler-0" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.971569 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d915556-0d78-4e5b-99fa-0c11bad2d1c8-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"4d915556-0d78-4e5b-99fa-0c11bad2d1c8\") " pod="openstack/cinder-scheduler-0" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.973439 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4d915556-0d78-4e5b-99fa-0c11bad2d1c8-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"4d915556-0d78-4e5b-99fa-0c11bad2d1c8\") " pod="openstack/cinder-scheduler-0" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.976782 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d915556-0d78-4e5b-99fa-0c11bad2d1c8-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"4d915556-0d78-4e5b-99fa-0c11bad2d1c8\") " pod="openstack/cinder-scheduler-0" Jan 21 15:45:28 crc kubenswrapper[5021]: I0121 15:45:28.984600 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4d915556-0d78-4e5b-99fa-0c11bad2d1c8-config-data\") pod \"cinder-scheduler-0\" (UID: \"4d915556-0d78-4e5b-99fa-0c11bad2d1c8\") " pod="openstack/cinder-scheduler-0" Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:28.992404 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4d915556-0d78-4e5b-99fa-0c11bad2d1c8-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"4d915556-0d78-4e5b-99fa-0c11bad2d1c8\") " pod="openstack/cinder-scheduler-0" Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.004824 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-674b76c99f-vlmjb"] Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.006267 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-674b76c99f-vlmjb" Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.006954 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z97vx\" (UniqueName: \"kubernetes.io/projected/4d915556-0d78-4e5b-99fa-0c11bad2d1c8-kube-api-access-z97vx\") pod \"cinder-scheduler-0\" (UID: \"4d915556-0d78-4e5b-99fa-0c11bad2d1c8\") " pod="openstack/cinder-scheduler-0" Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.021726 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4d915556-0d78-4e5b-99fa-0c11bad2d1c8-scripts\") pod \"cinder-scheduler-0\" (UID: \"4d915556-0d78-4e5b-99fa-0c11bad2d1c8\") " pod="openstack/cinder-scheduler-0" Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.034205 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-674b76c99f-vlmjb"] Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.072753 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jscv7\" (UniqueName: \"kubernetes.io/projected/c5cd9226-205d-4888-bad3-0da3ccfd61c0-kube-api-access-jscv7\") pod \"dnsmasq-dns-674b76c99f-vlmjb\" (UID: \"c5cd9226-205d-4888-bad3-0da3ccfd61c0\") " pod="openstack/dnsmasq-dns-674b76c99f-vlmjb" Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.073129 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c5cd9226-205d-4888-bad3-0da3ccfd61c0-dns-swift-storage-0\") pod \"dnsmasq-dns-674b76c99f-vlmjb\" (UID: \"c5cd9226-205d-4888-bad3-0da3ccfd61c0\") " pod="openstack/dnsmasq-dns-674b76c99f-vlmjb" Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.073167 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c5cd9226-205d-4888-bad3-0da3ccfd61c0-config\") pod \"dnsmasq-dns-674b76c99f-vlmjb\" (UID: \"c5cd9226-205d-4888-bad3-0da3ccfd61c0\") " pod="openstack/dnsmasq-dns-674b76c99f-vlmjb" Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.073190 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c5cd9226-205d-4888-bad3-0da3ccfd61c0-ovsdbserver-nb\") pod \"dnsmasq-dns-674b76c99f-vlmjb\" (UID: \"c5cd9226-205d-4888-bad3-0da3ccfd61c0\") " pod="openstack/dnsmasq-dns-674b76c99f-vlmjb" Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.073244 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c5cd9226-205d-4888-bad3-0da3ccfd61c0-ovsdbserver-sb\") pod \"dnsmasq-dns-674b76c99f-vlmjb\" (UID: \"c5cd9226-205d-4888-bad3-0da3ccfd61c0\") " pod="openstack/dnsmasq-dns-674b76c99f-vlmjb" Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.073310 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c5cd9226-205d-4888-bad3-0da3ccfd61c0-dns-svc\") pod \"dnsmasq-dns-674b76c99f-vlmjb\" (UID: \"c5cd9226-205d-4888-bad3-0da3ccfd61c0\") " pod="openstack/dnsmasq-dns-674b76c99f-vlmjb" Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.136810 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.140640 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.146303 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.163226 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.175957 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c5cd9226-205d-4888-bad3-0da3ccfd61c0-dns-svc\") pod \"dnsmasq-dns-674b76c99f-vlmjb\" (UID: \"c5cd9226-205d-4888-bad3-0da3ccfd61c0\") " pod="openstack/dnsmasq-dns-674b76c99f-vlmjb" Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.176026 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jscv7\" (UniqueName: \"kubernetes.io/projected/c5cd9226-205d-4888-bad3-0da3ccfd61c0-kube-api-access-jscv7\") pod \"dnsmasq-dns-674b76c99f-vlmjb\" (UID: \"c5cd9226-205d-4888-bad3-0da3ccfd61c0\") " pod="openstack/dnsmasq-dns-674b76c99f-vlmjb" Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.176135 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c5cd9226-205d-4888-bad3-0da3ccfd61c0-dns-swift-storage-0\") pod \"dnsmasq-dns-674b76c99f-vlmjb\" (UID: \"c5cd9226-205d-4888-bad3-0da3ccfd61c0\") " pod="openstack/dnsmasq-dns-674b76c99f-vlmjb" Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.176193 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c5cd9226-205d-4888-bad3-0da3ccfd61c0-config\") pod \"dnsmasq-dns-674b76c99f-vlmjb\" (UID: \"c5cd9226-205d-4888-bad3-0da3ccfd61c0\") " pod="openstack/dnsmasq-dns-674b76c99f-vlmjb" Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.177711 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c5cd9226-205d-4888-bad3-0da3ccfd61c0-ovsdbserver-nb\") pod \"dnsmasq-dns-674b76c99f-vlmjb\" (UID: \"c5cd9226-205d-4888-bad3-0da3ccfd61c0\") " pod="openstack/dnsmasq-dns-674b76c99f-vlmjb" Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.177796 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c5cd9226-205d-4888-bad3-0da3ccfd61c0-ovsdbserver-sb\") pod \"dnsmasq-dns-674b76c99f-vlmjb\" (UID: \"c5cd9226-205d-4888-bad3-0da3ccfd61c0\") " pod="openstack/dnsmasq-dns-674b76c99f-vlmjb" Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.178460 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c5cd9226-205d-4888-bad3-0da3ccfd61c0-dns-svc\") pod \"dnsmasq-dns-674b76c99f-vlmjb\" (UID: \"c5cd9226-205d-4888-bad3-0da3ccfd61c0\") " pod="openstack/dnsmasq-dns-674b76c99f-vlmjb" Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.185664 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c5cd9226-205d-4888-bad3-0da3ccfd61c0-config\") pod \"dnsmasq-dns-674b76c99f-vlmjb\" (UID: \"c5cd9226-205d-4888-bad3-0da3ccfd61c0\") " pod="openstack/dnsmasq-dns-674b76c99f-vlmjb" Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.186806 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c5cd9226-205d-4888-bad3-0da3ccfd61c0-ovsdbserver-sb\") pod \"dnsmasq-dns-674b76c99f-vlmjb\" (UID: \"c5cd9226-205d-4888-bad3-0da3ccfd61c0\") " pod="openstack/dnsmasq-dns-674b76c99f-vlmjb" Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.187287 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c5cd9226-205d-4888-bad3-0da3ccfd61c0-ovsdbserver-nb\") pod \"dnsmasq-dns-674b76c99f-vlmjb\" (UID: \"c5cd9226-205d-4888-bad3-0da3ccfd61c0\") " pod="openstack/dnsmasq-dns-674b76c99f-vlmjb" Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.187470 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c5cd9226-205d-4888-bad3-0da3ccfd61c0-dns-swift-storage-0\") pod \"dnsmasq-dns-674b76c99f-vlmjb\" (UID: \"c5cd9226-205d-4888-bad3-0da3ccfd61c0\") " pod="openstack/dnsmasq-dns-674b76c99f-vlmjb" Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.211443 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jscv7\" (UniqueName: \"kubernetes.io/projected/c5cd9226-205d-4888-bad3-0da3ccfd61c0-kube-api-access-jscv7\") pod \"dnsmasq-dns-674b76c99f-vlmjb\" (UID: \"c5cd9226-205d-4888-bad3-0da3ccfd61c0\") " pod="openstack/dnsmasq-dns-674b76c99f-vlmjb" Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.246940 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.279826 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2feb17bb-dc07-4307-b994-3402ac4a1dbb-config-data-custom\") pod \"cinder-api-0\" (UID: \"2feb17bb-dc07-4307-b994-3402ac4a1dbb\") " pod="openstack/cinder-api-0" Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.279914 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2feb17bb-dc07-4307-b994-3402ac4a1dbb-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"2feb17bb-dc07-4307-b994-3402ac4a1dbb\") " pod="openstack/cinder-api-0" Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.279952 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2feb17bb-dc07-4307-b994-3402ac4a1dbb-config-data\") pod \"cinder-api-0\" (UID: \"2feb17bb-dc07-4307-b994-3402ac4a1dbb\") " pod="openstack/cinder-api-0" Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.280084 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2feb17bb-dc07-4307-b994-3402ac4a1dbb-etc-machine-id\") pod \"cinder-api-0\" (UID: \"2feb17bb-dc07-4307-b994-3402ac4a1dbb\") " pod="openstack/cinder-api-0" Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.280149 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2feb17bb-dc07-4307-b994-3402ac4a1dbb-scripts\") pod \"cinder-api-0\" (UID: \"2feb17bb-dc07-4307-b994-3402ac4a1dbb\") " pod="openstack/cinder-api-0" Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.280187 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2feb17bb-dc07-4307-b994-3402ac4a1dbb-logs\") pod \"cinder-api-0\" (UID: \"2feb17bb-dc07-4307-b994-3402ac4a1dbb\") " pod="openstack/cinder-api-0" Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.280286 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2skkg\" (UniqueName: \"kubernetes.io/projected/2feb17bb-dc07-4307-b994-3402ac4a1dbb-kube-api-access-2skkg\") pod \"cinder-api-0\" (UID: \"2feb17bb-dc07-4307-b994-3402ac4a1dbb\") " pod="openstack/cinder-api-0" Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.337411 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-674b76c99f-vlmjb" Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.382861 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2feb17bb-dc07-4307-b994-3402ac4a1dbb-config-data\") pod \"cinder-api-0\" (UID: \"2feb17bb-dc07-4307-b994-3402ac4a1dbb\") " pod="openstack/cinder-api-0" Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.383847 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2feb17bb-dc07-4307-b994-3402ac4a1dbb-etc-machine-id\") pod \"cinder-api-0\" (UID: \"2feb17bb-dc07-4307-b994-3402ac4a1dbb\") " pod="openstack/cinder-api-0" Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.383882 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2feb17bb-dc07-4307-b994-3402ac4a1dbb-scripts\") pod \"cinder-api-0\" (UID: \"2feb17bb-dc07-4307-b994-3402ac4a1dbb\") " pod="openstack/cinder-api-0" Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.383929 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2feb17bb-dc07-4307-b994-3402ac4a1dbb-logs\") pod \"cinder-api-0\" (UID: \"2feb17bb-dc07-4307-b994-3402ac4a1dbb\") " pod="openstack/cinder-api-0" Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.383978 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2skkg\" (UniqueName: \"kubernetes.io/projected/2feb17bb-dc07-4307-b994-3402ac4a1dbb-kube-api-access-2skkg\") pod \"cinder-api-0\" (UID: \"2feb17bb-dc07-4307-b994-3402ac4a1dbb\") " pod="openstack/cinder-api-0" Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.384022 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2feb17bb-dc07-4307-b994-3402ac4a1dbb-etc-machine-id\") pod \"cinder-api-0\" (UID: \"2feb17bb-dc07-4307-b994-3402ac4a1dbb\") " pod="openstack/cinder-api-0" Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.384060 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2feb17bb-dc07-4307-b994-3402ac4a1dbb-config-data-custom\") pod \"cinder-api-0\" (UID: \"2feb17bb-dc07-4307-b994-3402ac4a1dbb\") " pod="openstack/cinder-api-0" Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.384146 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2feb17bb-dc07-4307-b994-3402ac4a1dbb-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"2feb17bb-dc07-4307-b994-3402ac4a1dbb\") " pod="openstack/cinder-api-0" Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.384472 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2feb17bb-dc07-4307-b994-3402ac4a1dbb-logs\") pod \"cinder-api-0\" (UID: \"2feb17bb-dc07-4307-b994-3402ac4a1dbb\") " pod="openstack/cinder-api-0" Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.392883 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2feb17bb-dc07-4307-b994-3402ac4a1dbb-config-data\") pod \"cinder-api-0\" (UID: \"2feb17bb-dc07-4307-b994-3402ac4a1dbb\") " pod="openstack/cinder-api-0" Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.405417 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2feb17bb-dc07-4307-b994-3402ac4a1dbb-scripts\") pod \"cinder-api-0\" (UID: \"2feb17bb-dc07-4307-b994-3402ac4a1dbb\") " pod="openstack/cinder-api-0" Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.406169 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2feb17bb-dc07-4307-b994-3402ac4a1dbb-config-data-custom\") pod \"cinder-api-0\" (UID: \"2feb17bb-dc07-4307-b994-3402ac4a1dbb\") " pod="openstack/cinder-api-0" Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.424528 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2skkg\" (UniqueName: \"kubernetes.io/projected/2feb17bb-dc07-4307-b994-3402ac4a1dbb-kube-api-access-2skkg\") pod \"cinder-api-0\" (UID: \"2feb17bb-dc07-4307-b994-3402ac4a1dbb\") " pod="openstack/cinder-api-0" Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.429630 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2feb17bb-dc07-4307-b994-3402ac4a1dbb-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"2feb17bb-dc07-4307-b994-3402ac4a1dbb\") " pod="openstack/cinder-api-0" Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.443895 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-7b8886d4fd-qn9sz"] Jan 21 15:45:29 crc kubenswrapper[5021]: W0121 15:45:29.453390 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfad66107_0589_4ed8_94dc_fd29f2f58c43.slice/crio-9d82cfb2a45dcf44725b742ad79254d95a917c7a0a786389a2eb05b04d7693e8 WatchSource:0}: Error finding container 9d82cfb2a45dcf44725b742ad79254d95a917c7a0a786389a2eb05b04d7693e8: Status 404 returned error can't find the container with id 9d82cfb2a45dcf44725b742ad79254d95a917c7a0a786389a2eb05b04d7693e8 Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.469346 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.610268 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7b8886d4fd-qn9sz" event={"ID":"fad66107-0589-4ed8-94dc-fd29f2f58c43","Type":"ContainerStarted","Data":"9d82cfb2a45dcf44725b742ad79254d95a917c7a0a786389a2eb05b04d7693e8"} Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.613014 5021 generic.go:334] "Generic (PLEG): container finished" podID="a4e81a6f-5e29-4ba7-8aed-f6726afe6e64" containerID="4a34d9f6ec66f2e4d1c220d85ef44f38360950463cef6a69c179f9757ff5c4f8" exitCode=0 Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.613085 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d66f584d7-m2bc9" event={"ID":"a4e81a6f-5e29-4ba7-8aed-f6726afe6e64","Type":"ContainerDied","Data":"4a34d9f6ec66f2e4d1c220d85ef44f38360950463cef6a69c179f9757ff5c4f8"} Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.620703 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-76bc56d748-8glcs" event={"ID":"e7a38a9d-65cf-48dd-8f36-44a78a53e48f","Type":"ContainerStarted","Data":"7673fa928a9d34d9093a07ad100e4f08c6bac6b0eb9a73dd956508ac3f6d49ca"} Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.633837 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7bc8f89b55-8c6t2" event={"ID":"4d076ab0-b0c8-48a0-baa0-589c99376c72","Type":"ContainerStarted","Data":"e1e5425a7a11ce797c6d259dce5739f59a0a9337b9e52538e7943644bd38dc3e"} Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.637845 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-76bc56d748-8glcs" podStartSLOduration=4.7197282860000005 podStartE2EDuration="6.637826014s" podCreationTimestamp="2026-01-21 15:45:23 +0000 UTC" firstStartedPulling="2026-01-21 15:45:24.824699929 +0000 UTC m=+1266.359813818" lastFinishedPulling="2026-01-21 15:45:26.742797657 +0000 UTC m=+1268.277911546" observedRunningTime="2026-01-21 15:45:29.636876448 +0000 UTC m=+1271.171990337" watchObservedRunningTime="2026-01-21 15:45:29.637826014 +0000 UTC m=+1271.172939923" Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.668373 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-7bc8f89b55-8c6t2" podStartSLOduration=4.807189579 podStartE2EDuration="6.668355305s" podCreationTimestamp="2026-01-21 15:45:23 +0000 UTC" firstStartedPulling="2026-01-21 15:45:24.887969311 +0000 UTC m=+1266.423083200" lastFinishedPulling="2026-01-21 15:45:26.749135037 +0000 UTC m=+1268.284248926" observedRunningTime="2026-01-21 15:45:29.666664919 +0000 UTC m=+1271.201778808" watchObservedRunningTime="2026-01-21 15:45:29.668355305 +0000 UTC m=+1271.203469194" Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.835581 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-674b76c99f-vlmjb"] Jan 21 15:45:29 crc kubenswrapper[5021]: W0121 15:45:29.845168 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc5cd9226_205d_4888_bad3_0da3ccfd61c0.slice/crio-c3e68e561b14f05717a3335080b622498b2d2e5f3f4d3a23e2bbe31a09d7aead WatchSource:0}: Error finding container c3e68e561b14f05717a3335080b622498b2d2e5f3f4d3a23e2bbe31a09d7aead: Status 404 returned error can't find the container with id c3e68e561b14f05717a3335080b622498b2d2e5f3f4d3a23e2bbe31a09d7aead Jan 21 15:45:29 crc kubenswrapper[5021]: W0121 15:45:29.916417 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4d915556_0d78_4e5b_99fa_0c11bad2d1c8.slice/crio-3d59b0bdecab517aceca0b845280156f65593dd4c2a1e2d7962cdd9901c89e15 WatchSource:0}: Error finding container 3d59b0bdecab517aceca0b845280156f65593dd4c2a1e2d7962cdd9901c89e15: Status 404 returned error can't find the container with id 3d59b0bdecab517aceca0b845280156f65593dd4c2a1e2d7962cdd9901c89e15 Jan 21 15:45:29 crc kubenswrapper[5021]: I0121 15:45:29.921792 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 21 15:45:30 crc kubenswrapper[5021]: I0121 15:45:30.172554 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 21 15:45:30 crc kubenswrapper[5021]: I0121 15:45:30.257670 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d66f584d7-m2bc9" Jan 21 15:45:30 crc kubenswrapper[5021]: I0121 15:45:30.322538 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rc7tk\" (UniqueName: \"kubernetes.io/projected/a4e81a6f-5e29-4ba7-8aed-f6726afe6e64-kube-api-access-rc7tk\") pod \"a4e81a6f-5e29-4ba7-8aed-f6726afe6e64\" (UID: \"a4e81a6f-5e29-4ba7-8aed-f6726afe6e64\") " Jan 21 15:45:30 crc kubenswrapper[5021]: I0121 15:45:30.322812 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a4e81a6f-5e29-4ba7-8aed-f6726afe6e64-ovsdbserver-sb\") pod \"a4e81a6f-5e29-4ba7-8aed-f6726afe6e64\" (UID: \"a4e81a6f-5e29-4ba7-8aed-f6726afe6e64\") " Jan 21 15:45:30 crc kubenswrapper[5021]: I0121 15:45:30.323033 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4e81a6f-5e29-4ba7-8aed-f6726afe6e64-config\") pod \"a4e81a6f-5e29-4ba7-8aed-f6726afe6e64\" (UID: \"a4e81a6f-5e29-4ba7-8aed-f6726afe6e64\") " Jan 21 15:45:30 crc kubenswrapper[5021]: I0121 15:45:30.323147 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a4e81a6f-5e29-4ba7-8aed-f6726afe6e64-dns-svc\") pod \"a4e81a6f-5e29-4ba7-8aed-f6726afe6e64\" (UID: \"a4e81a6f-5e29-4ba7-8aed-f6726afe6e64\") " Jan 21 15:45:30 crc kubenswrapper[5021]: I0121 15:45:30.323229 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a4e81a6f-5e29-4ba7-8aed-f6726afe6e64-ovsdbserver-nb\") pod \"a4e81a6f-5e29-4ba7-8aed-f6726afe6e64\" (UID: \"a4e81a6f-5e29-4ba7-8aed-f6726afe6e64\") " Jan 21 15:45:30 crc kubenswrapper[5021]: I0121 15:45:30.323329 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a4e81a6f-5e29-4ba7-8aed-f6726afe6e64-dns-swift-storage-0\") pod \"a4e81a6f-5e29-4ba7-8aed-f6726afe6e64\" (UID: \"a4e81a6f-5e29-4ba7-8aed-f6726afe6e64\") " Jan 21 15:45:30 crc kubenswrapper[5021]: I0121 15:45:30.332107 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a4e81a6f-5e29-4ba7-8aed-f6726afe6e64-kube-api-access-rc7tk" (OuterVolumeSpecName: "kube-api-access-rc7tk") pod "a4e81a6f-5e29-4ba7-8aed-f6726afe6e64" (UID: "a4e81a6f-5e29-4ba7-8aed-f6726afe6e64"). InnerVolumeSpecName "kube-api-access-rc7tk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:45:30 crc kubenswrapper[5021]: I0121 15:45:30.390645 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a4e81a6f-5e29-4ba7-8aed-f6726afe6e64-config" (OuterVolumeSpecName: "config") pod "a4e81a6f-5e29-4ba7-8aed-f6726afe6e64" (UID: "a4e81a6f-5e29-4ba7-8aed-f6726afe6e64"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:45:30 crc kubenswrapper[5021]: I0121 15:45:30.404783 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a4e81a6f-5e29-4ba7-8aed-f6726afe6e64-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "a4e81a6f-5e29-4ba7-8aed-f6726afe6e64" (UID: "a4e81a6f-5e29-4ba7-8aed-f6726afe6e64"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:45:30 crc kubenswrapper[5021]: I0121 15:45:30.425763 5021 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4e81a6f-5e29-4ba7-8aed-f6726afe6e64-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:30 crc kubenswrapper[5021]: I0121 15:45:30.425827 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rc7tk\" (UniqueName: \"kubernetes.io/projected/a4e81a6f-5e29-4ba7-8aed-f6726afe6e64-kube-api-access-rc7tk\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:30 crc kubenswrapper[5021]: I0121 15:45:30.425843 5021 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a4e81a6f-5e29-4ba7-8aed-f6726afe6e64-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:30 crc kubenswrapper[5021]: I0121 15:45:30.435934 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a4e81a6f-5e29-4ba7-8aed-f6726afe6e64-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "a4e81a6f-5e29-4ba7-8aed-f6726afe6e64" (UID: "a4e81a6f-5e29-4ba7-8aed-f6726afe6e64"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:45:30 crc kubenswrapper[5021]: I0121 15:45:30.445607 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a4e81a6f-5e29-4ba7-8aed-f6726afe6e64-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a4e81a6f-5e29-4ba7-8aed-f6726afe6e64" (UID: "a4e81a6f-5e29-4ba7-8aed-f6726afe6e64"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:45:30 crc kubenswrapper[5021]: I0121 15:45:30.454874 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a4e81a6f-5e29-4ba7-8aed-f6726afe6e64-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "a4e81a6f-5e29-4ba7-8aed-f6726afe6e64" (UID: "a4e81a6f-5e29-4ba7-8aed-f6726afe6e64"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:45:30 crc kubenswrapper[5021]: I0121 15:45:30.528164 5021 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a4e81a6f-5e29-4ba7-8aed-f6726afe6e64-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:30 crc kubenswrapper[5021]: I0121 15:45:30.528198 5021 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a4e81a6f-5e29-4ba7-8aed-f6726afe6e64-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:30 crc kubenswrapper[5021]: I0121 15:45:30.528209 5021 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a4e81a6f-5e29-4ba7-8aed-f6726afe6e64-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:30 crc kubenswrapper[5021]: I0121 15:45:30.649276 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d66f584d7-m2bc9" event={"ID":"a4e81a6f-5e29-4ba7-8aed-f6726afe6e64","Type":"ContainerDied","Data":"3a0986ec8ff5dd9fe73b8d70914d1af47ac3e25f73a6b979fcdb238116524057"} Jan 21 15:45:30 crc kubenswrapper[5021]: I0121 15:45:30.649383 5021 scope.go:117] "RemoveContainer" containerID="4a34d9f6ec66f2e4d1c220d85ef44f38360950463cef6a69c179f9757ff5c4f8" Jan 21 15:45:30 crc kubenswrapper[5021]: I0121 15:45:30.649722 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d66f584d7-m2bc9" Jan 21 15:45:30 crc kubenswrapper[5021]: I0121 15:45:30.664716 5021 generic.go:334] "Generic (PLEG): container finished" podID="c5cd9226-205d-4888-bad3-0da3ccfd61c0" containerID="1bf2717fa1e27107e56b6e549b9fef6cd651fdaf8870560664a4c00506fcb995" exitCode=0 Jan 21 15:45:30 crc kubenswrapper[5021]: I0121 15:45:30.664765 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-674b76c99f-vlmjb" event={"ID":"c5cd9226-205d-4888-bad3-0da3ccfd61c0","Type":"ContainerDied","Data":"1bf2717fa1e27107e56b6e549b9fef6cd651fdaf8870560664a4c00506fcb995"} Jan 21 15:45:30 crc kubenswrapper[5021]: I0121 15:45:30.664787 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-674b76c99f-vlmjb" event={"ID":"c5cd9226-205d-4888-bad3-0da3ccfd61c0","Type":"ContainerStarted","Data":"c3e68e561b14f05717a3335080b622498b2d2e5f3f4d3a23e2bbe31a09d7aead"} Jan 21 15:45:30 crc kubenswrapper[5021]: I0121 15:45:30.679109 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7b8886d4fd-qn9sz" event={"ID":"fad66107-0589-4ed8-94dc-fd29f2f58c43","Type":"ContainerStarted","Data":"b0be39c8ae52be02d5990fbd6de2c149adc18e9a2711ad760bb76af11a809a0e"} Jan 21 15:45:30 crc kubenswrapper[5021]: I0121 15:45:30.685468 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"4d915556-0d78-4e5b-99fa-0c11bad2d1c8","Type":"ContainerStarted","Data":"3d59b0bdecab517aceca0b845280156f65593dd4c2a1e2d7962cdd9901c89e15"} Jan 21 15:45:30 crc kubenswrapper[5021]: I0121 15:45:30.714129 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"2feb17bb-dc07-4307-b994-3402ac4a1dbb","Type":"ContainerStarted","Data":"b51cd969dd0947621e4f3ee57176f1c54afc01da026b6bc4483c24364abecbcc"} Jan 21 15:45:30 crc kubenswrapper[5021]: I0121 15:45:30.766791 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6d66f584d7-m2bc9"] Jan 21 15:45:30 crc kubenswrapper[5021]: I0121 15:45:30.783836 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6d66f584d7-m2bc9"] Jan 21 15:45:31 crc kubenswrapper[5021]: I0121 15:45:31.708695 5021 scope.go:117] "RemoveContainer" containerID="59cd94a631ab3d21db1b67111f84c0246f6443bd659d226a857d5740724e2557" Jan 21 15:45:32 crc kubenswrapper[5021]: I0121 15:45:32.053580 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 21 15:45:32 crc kubenswrapper[5021]: I0121 15:45:32.763425 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a4e81a6f-5e29-4ba7-8aed-f6726afe6e64" path="/var/lib/kubelet/pods/a4e81a6f-5e29-4ba7-8aed-f6726afe6e64/volumes" Jan 21 15:45:32 crc kubenswrapper[5021]: I0121 15:45:32.793301 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"2feb17bb-dc07-4307-b994-3402ac4a1dbb","Type":"ContainerStarted","Data":"51779ca7947247cdc7010395e5254a86cb2c5a42e677b08fe62568f9c4b8d552"} Jan 21 15:45:33 crc kubenswrapper[5021]: I0121 15:45:33.816038 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"4d915556-0d78-4e5b-99fa-0c11bad2d1c8","Type":"ContainerStarted","Data":"76e89ea49e8c1303c7bd38a2eb5b57d617464597851c7acc7f011a8232d4f998"} Jan 21 15:45:33 crc kubenswrapper[5021]: I0121 15:45:33.822589 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"2feb17bb-dc07-4307-b994-3402ac4a1dbb","Type":"ContainerStarted","Data":"17d50952b46b5e0295f25d872de2943c96334598fe41d5c3d55c212dfb78e0ce"} Jan 21 15:45:33 crc kubenswrapper[5021]: I0121 15:45:33.822885 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="2feb17bb-dc07-4307-b994-3402ac4a1dbb" containerName="cinder-api-log" containerID="cri-o://51779ca7947247cdc7010395e5254a86cb2c5a42e677b08fe62568f9c4b8d552" gracePeriod=30 Jan 21 15:45:33 crc kubenswrapper[5021]: I0121 15:45:33.823279 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Jan 21 15:45:33 crc kubenswrapper[5021]: I0121 15:45:33.823612 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="2feb17bb-dc07-4307-b994-3402ac4a1dbb" containerName="cinder-api" containerID="cri-o://17d50952b46b5e0295f25d872de2943c96334598fe41d5c3d55c212dfb78e0ce" gracePeriod=30 Jan 21 15:45:33 crc kubenswrapper[5021]: I0121 15:45:33.827847 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-674b76c99f-vlmjb" event={"ID":"c5cd9226-205d-4888-bad3-0da3ccfd61c0","Type":"ContainerStarted","Data":"898a30881cb859aa7268ec5cb80a6cda72a8fc0d85fabd06b75fc2168b6a2784"} Jan 21 15:45:33 crc kubenswrapper[5021]: I0121 15:45:33.828829 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-674b76c99f-vlmjb" Jan 21 15:45:33 crc kubenswrapper[5021]: I0121 15:45:33.848928 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"71fa8674-249d-42fa-b95b-bf52592b8998","Type":"ContainerStarted","Data":"04b7414bccea40c7f838cafd44d5c9018728f3c618548462c3b7782186544f11"} Jan 21 15:45:33 crc kubenswrapper[5021]: I0121 15:45:33.858385 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=4.858363539 podStartE2EDuration="4.858363539s" podCreationTimestamp="2026-01-21 15:45:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:45:33.857837326 +0000 UTC m=+1275.392951225" watchObservedRunningTime="2026-01-21 15:45:33.858363539 +0000 UTC m=+1275.393477438" Jan 21 15:45:33 crc kubenswrapper[5021]: I0121 15:45:33.867230 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7b8886d4fd-qn9sz" event={"ID":"fad66107-0589-4ed8-94dc-fd29f2f58c43","Type":"ContainerStarted","Data":"3e2ad39675705ef9c70fa28adb97c9a01666ba00b4202ad69b0fcb8f9b4aba7d"} Jan 21 15:45:33 crc kubenswrapper[5021]: I0121 15:45:33.867571 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-7b8886d4fd-qn9sz" Jan 21 15:45:33 crc kubenswrapper[5021]: I0121 15:45:33.895291 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-674b76c99f-vlmjb" podStartSLOduration=5.895270062 podStartE2EDuration="5.895270062s" podCreationTimestamp="2026-01-21 15:45:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:45:33.886780484 +0000 UTC m=+1275.421894383" watchObservedRunningTime="2026-01-21 15:45:33.895270062 +0000 UTC m=+1275.430383951" Jan 21 15:45:33 crc kubenswrapper[5021]: I0121 15:45:33.906057 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-7b8886d4fd-qn9sz" Jan 21 15:45:33 crc kubenswrapper[5021]: I0121 15:45:33.925629 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-7b8886d4fd-qn9sz" podStartSLOduration=5.925607007 podStartE2EDuration="5.925607007s" podCreationTimestamp="2026-01-21 15:45:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:45:33.910185452 +0000 UTC m=+1275.445299351" watchObservedRunningTime="2026-01-21 15:45:33.925607007 +0000 UTC m=+1275.460720896" Jan 21 15:45:34 crc kubenswrapper[5021]: I0121 15:45:34.910663 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"71fa8674-249d-42fa-b95b-bf52592b8998","Type":"ContainerStarted","Data":"596c1cb00a39323ab80b8ae8eeccc0bff50779bd7028c347a7ec06f50ef5adf0"} Jan 21 15:45:34 crc kubenswrapper[5021]: I0121 15:45:34.911334 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 21 15:45:34 crc kubenswrapper[5021]: I0121 15:45:34.930529 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"4d915556-0d78-4e5b-99fa-0c11bad2d1c8","Type":"ContainerStarted","Data":"6309ec3669ff7e2958129f07737666d2240f9f189c3b273e0dfa5bb0a8112966"} Jan 21 15:45:34 crc kubenswrapper[5021]: I0121 15:45:34.948396 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.183552816 podStartE2EDuration="11.948377509s" podCreationTimestamp="2026-01-21 15:45:23 +0000 UTC" firstStartedPulling="2026-01-21 15:45:24.703307997 +0000 UTC m=+1266.238421886" lastFinishedPulling="2026-01-21 15:45:34.46813269 +0000 UTC m=+1276.003246579" observedRunningTime="2026-01-21 15:45:34.943252151 +0000 UTC m=+1276.478366040" watchObservedRunningTime="2026-01-21 15:45:34.948377509 +0000 UTC m=+1276.483491388" Jan 21 15:45:34 crc kubenswrapper[5021]: I0121 15:45:34.949746 5021 generic.go:334] "Generic (PLEG): container finished" podID="2feb17bb-dc07-4307-b994-3402ac4a1dbb" containerID="17d50952b46b5e0295f25d872de2943c96334598fe41d5c3d55c212dfb78e0ce" exitCode=0 Jan 21 15:45:34 crc kubenswrapper[5021]: I0121 15:45:34.949799 5021 generic.go:334] "Generic (PLEG): container finished" podID="2feb17bb-dc07-4307-b994-3402ac4a1dbb" containerID="51779ca7947247cdc7010395e5254a86cb2c5a42e677b08fe62568f9c4b8d552" exitCode=143 Jan 21 15:45:34 crc kubenswrapper[5021]: I0121 15:45:34.949840 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"2feb17bb-dc07-4307-b994-3402ac4a1dbb","Type":"ContainerDied","Data":"17d50952b46b5e0295f25d872de2943c96334598fe41d5c3d55c212dfb78e0ce"} Jan 21 15:45:34 crc kubenswrapper[5021]: I0121 15:45:34.949899 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"2feb17bb-dc07-4307-b994-3402ac4a1dbb","Type":"ContainerDied","Data":"51779ca7947247cdc7010395e5254a86cb2c5a42e677b08fe62568f9c4b8d552"} Jan 21 15:45:34 crc kubenswrapper[5021]: I0121 15:45:34.975445 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=4.574385628 podStartE2EDuration="6.975425696s" podCreationTimestamp="2026-01-21 15:45:28 +0000 UTC" firstStartedPulling="2026-01-21 15:45:29.918827117 +0000 UTC m=+1271.453941016" lastFinishedPulling="2026-01-21 15:45:32.319867195 +0000 UTC m=+1273.854981084" observedRunningTime="2026-01-21 15:45:34.971187102 +0000 UTC m=+1276.506301001" watchObservedRunningTime="2026-01-21 15:45:34.975425696 +0000 UTC m=+1276.510539585" Jan 21 15:45:35 crc kubenswrapper[5021]: I0121 15:45:35.484243 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 21 15:45:35 crc kubenswrapper[5021]: I0121 15:45:35.549523 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2feb17bb-dc07-4307-b994-3402ac4a1dbb-logs\") pod \"2feb17bb-dc07-4307-b994-3402ac4a1dbb\" (UID: \"2feb17bb-dc07-4307-b994-3402ac4a1dbb\") " Jan 21 15:45:35 crc kubenswrapper[5021]: I0121 15:45:35.549597 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2feb17bb-dc07-4307-b994-3402ac4a1dbb-etc-machine-id\") pod \"2feb17bb-dc07-4307-b994-3402ac4a1dbb\" (UID: \"2feb17bb-dc07-4307-b994-3402ac4a1dbb\") " Jan 21 15:45:35 crc kubenswrapper[5021]: I0121 15:45:35.549639 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2feb17bb-dc07-4307-b994-3402ac4a1dbb-config-data\") pod \"2feb17bb-dc07-4307-b994-3402ac4a1dbb\" (UID: \"2feb17bb-dc07-4307-b994-3402ac4a1dbb\") " Jan 21 15:45:35 crc kubenswrapper[5021]: I0121 15:45:35.549670 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2feb17bb-dc07-4307-b994-3402ac4a1dbb-scripts\") pod \"2feb17bb-dc07-4307-b994-3402ac4a1dbb\" (UID: \"2feb17bb-dc07-4307-b994-3402ac4a1dbb\") " Jan 21 15:45:35 crc kubenswrapper[5021]: I0121 15:45:35.549736 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2feb17bb-dc07-4307-b994-3402ac4a1dbb-combined-ca-bundle\") pod \"2feb17bb-dc07-4307-b994-3402ac4a1dbb\" (UID: \"2feb17bb-dc07-4307-b994-3402ac4a1dbb\") " Jan 21 15:45:35 crc kubenswrapper[5021]: I0121 15:45:35.549772 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2feb17bb-dc07-4307-b994-3402ac4a1dbb-config-data-custom\") pod \"2feb17bb-dc07-4307-b994-3402ac4a1dbb\" (UID: \"2feb17bb-dc07-4307-b994-3402ac4a1dbb\") " Jan 21 15:45:35 crc kubenswrapper[5021]: I0121 15:45:35.549923 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2skkg\" (UniqueName: \"kubernetes.io/projected/2feb17bb-dc07-4307-b994-3402ac4a1dbb-kube-api-access-2skkg\") pod \"2feb17bb-dc07-4307-b994-3402ac4a1dbb\" (UID: \"2feb17bb-dc07-4307-b994-3402ac4a1dbb\") " Jan 21 15:45:35 crc kubenswrapper[5021]: I0121 15:45:35.551880 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2feb17bb-dc07-4307-b994-3402ac4a1dbb-logs" (OuterVolumeSpecName: "logs") pod "2feb17bb-dc07-4307-b994-3402ac4a1dbb" (UID: "2feb17bb-dc07-4307-b994-3402ac4a1dbb"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:45:35 crc kubenswrapper[5021]: I0121 15:45:35.554019 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2feb17bb-dc07-4307-b994-3402ac4a1dbb-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "2feb17bb-dc07-4307-b994-3402ac4a1dbb" (UID: "2feb17bb-dc07-4307-b994-3402ac4a1dbb"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 15:45:35 crc kubenswrapper[5021]: I0121 15:45:35.571325 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2feb17bb-dc07-4307-b994-3402ac4a1dbb-kube-api-access-2skkg" (OuterVolumeSpecName: "kube-api-access-2skkg") pod "2feb17bb-dc07-4307-b994-3402ac4a1dbb" (UID: "2feb17bb-dc07-4307-b994-3402ac4a1dbb"). InnerVolumeSpecName "kube-api-access-2skkg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:45:35 crc kubenswrapper[5021]: I0121 15:45:35.576068 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2feb17bb-dc07-4307-b994-3402ac4a1dbb-scripts" (OuterVolumeSpecName: "scripts") pod "2feb17bb-dc07-4307-b994-3402ac4a1dbb" (UID: "2feb17bb-dc07-4307-b994-3402ac4a1dbb"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:45:35 crc kubenswrapper[5021]: I0121 15:45:35.601074 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2feb17bb-dc07-4307-b994-3402ac4a1dbb-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "2feb17bb-dc07-4307-b994-3402ac4a1dbb" (UID: "2feb17bb-dc07-4307-b994-3402ac4a1dbb"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:45:35 crc kubenswrapper[5021]: I0121 15:45:35.638040 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2feb17bb-dc07-4307-b994-3402ac4a1dbb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2feb17bb-dc07-4307-b994-3402ac4a1dbb" (UID: "2feb17bb-dc07-4307-b994-3402ac4a1dbb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:45:35 crc kubenswrapper[5021]: I0121 15:45:35.654062 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2feb17bb-dc07-4307-b994-3402ac4a1dbb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:35 crc kubenswrapper[5021]: I0121 15:45:35.654303 5021 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2feb17bb-dc07-4307-b994-3402ac4a1dbb-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:35 crc kubenswrapper[5021]: I0121 15:45:35.654368 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2skkg\" (UniqueName: \"kubernetes.io/projected/2feb17bb-dc07-4307-b994-3402ac4a1dbb-kube-api-access-2skkg\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:35 crc kubenswrapper[5021]: I0121 15:45:35.654452 5021 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2feb17bb-dc07-4307-b994-3402ac4a1dbb-logs\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:35 crc kubenswrapper[5021]: I0121 15:45:35.654514 5021 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2feb17bb-dc07-4307-b994-3402ac4a1dbb-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:35 crc kubenswrapper[5021]: I0121 15:45:35.654576 5021 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2feb17bb-dc07-4307-b994-3402ac4a1dbb-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:35 crc kubenswrapper[5021]: I0121 15:45:35.657085 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2feb17bb-dc07-4307-b994-3402ac4a1dbb-config-data" (OuterVolumeSpecName: "config-data") pod "2feb17bb-dc07-4307-b994-3402ac4a1dbb" (UID: "2feb17bb-dc07-4307-b994-3402ac4a1dbb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:45:35 crc kubenswrapper[5021]: I0121 15:45:35.756686 5021 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2feb17bb-dc07-4307-b994-3402ac4a1dbb-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:35 crc kubenswrapper[5021]: I0121 15:45:35.964781 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"2feb17bb-dc07-4307-b994-3402ac4a1dbb","Type":"ContainerDied","Data":"b51cd969dd0947621e4f3ee57176f1c54afc01da026b6bc4483c24364abecbcc"} Jan 21 15:45:35 crc kubenswrapper[5021]: I0121 15:45:35.964865 5021 scope.go:117] "RemoveContainer" containerID="17d50952b46b5e0295f25d872de2943c96334598fe41d5c3d55c212dfb78e0ce" Jan 21 15:45:35 crc kubenswrapper[5021]: I0121 15:45:35.965581 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 21 15:45:36 crc kubenswrapper[5021]: I0121 15:45:36.003223 5021 scope.go:117] "RemoveContainer" containerID="51779ca7947247cdc7010395e5254a86cb2c5a42e677b08fe62568f9c4b8d552" Jan 21 15:45:36 crc kubenswrapper[5021]: I0121 15:45:36.016336 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 21 15:45:36 crc kubenswrapper[5021]: I0121 15:45:36.027716 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Jan 21 15:45:36 crc kubenswrapper[5021]: I0121 15:45:36.041239 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Jan 21 15:45:36 crc kubenswrapper[5021]: E0121 15:45:36.041626 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2feb17bb-dc07-4307-b994-3402ac4a1dbb" containerName="cinder-api-log" Jan 21 15:45:36 crc kubenswrapper[5021]: I0121 15:45:36.041643 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="2feb17bb-dc07-4307-b994-3402ac4a1dbb" containerName="cinder-api-log" Jan 21 15:45:36 crc kubenswrapper[5021]: E0121 15:45:36.041656 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4e81a6f-5e29-4ba7-8aed-f6726afe6e64" containerName="init" Jan 21 15:45:36 crc kubenswrapper[5021]: I0121 15:45:36.041663 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4e81a6f-5e29-4ba7-8aed-f6726afe6e64" containerName="init" Jan 21 15:45:36 crc kubenswrapper[5021]: E0121 15:45:36.041680 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4e81a6f-5e29-4ba7-8aed-f6726afe6e64" containerName="dnsmasq-dns" Jan 21 15:45:36 crc kubenswrapper[5021]: I0121 15:45:36.041688 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4e81a6f-5e29-4ba7-8aed-f6726afe6e64" containerName="dnsmasq-dns" Jan 21 15:45:36 crc kubenswrapper[5021]: E0121 15:45:36.041703 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2feb17bb-dc07-4307-b994-3402ac4a1dbb" containerName="cinder-api" Jan 21 15:45:36 crc kubenswrapper[5021]: I0121 15:45:36.041709 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="2feb17bb-dc07-4307-b994-3402ac4a1dbb" containerName="cinder-api" Jan 21 15:45:36 crc kubenswrapper[5021]: I0121 15:45:36.045411 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4e81a6f-5e29-4ba7-8aed-f6726afe6e64" containerName="dnsmasq-dns" Jan 21 15:45:36 crc kubenswrapper[5021]: I0121 15:45:36.045481 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="2feb17bb-dc07-4307-b994-3402ac4a1dbb" containerName="cinder-api" Jan 21 15:45:36 crc kubenswrapper[5021]: I0121 15:45:36.045507 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="2feb17bb-dc07-4307-b994-3402ac4a1dbb" containerName="cinder-api-log" Jan 21 15:45:36 crc kubenswrapper[5021]: I0121 15:45:36.046613 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 21 15:45:36 crc kubenswrapper[5021]: I0121 15:45:36.053104 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Jan 21 15:45:36 crc kubenswrapper[5021]: I0121 15:45:36.053360 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Jan 21 15:45:36 crc kubenswrapper[5021]: I0121 15:45:36.054270 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Jan 21 15:45:36 crc kubenswrapper[5021]: I0121 15:45:36.058563 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 21 15:45:36 crc kubenswrapper[5021]: I0121 15:45:36.166979 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ccf7211-3a03-41f1-839a-7bda93e55d4b-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"7ccf7211-3a03-41f1-839a-7bda93e55d4b\") " pod="openstack/cinder-api-0" Jan 21 15:45:36 crc kubenswrapper[5021]: I0121 15:45:36.167029 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ccf7211-3a03-41f1-839a-7bda93e55d4b-public-tls-certs\") pod \"cinder-api-0\" (UID: \"7ccf7211-3a03-41f1-839a-7bda93e55d4b\") " pod="openstack/cinder-api-0" Jan 21 15:45:36 crc kubenswrapper[5021]: I0121 15:45:36.167068 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7ccf7211-3a03-41f1-839a-7bda93e55d4b-scripts\") pod \"cinder-api-0\" (UID: \"7ccf7211-3a03-41f1-839a-7bda93e55d4b\") " pod="openstack/cinder-api-0" Jan 21 15:45:36 crc kubenswrapper[5021]: I0121 15:45:36.167245 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ccf7211-3a03-41f1-839a-7bda93e55d4b-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"7ccf7211-3a03-41f1-839a-7bda93e55d4b\") " pod="openstack/cinder-api-0" Jan 21 15:45:36 crc kubenswrapper[5021]: I0121 15:45:36.167296 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ccf7211-3a03-41f1-839a-7bda93e55d4b-config-data\") pod \"cinder-api-0\" (UID: \"7ccf7211-3a03-41f1-839a-7bda93e55d4b\") " pod="openstack/cinder-api-0" Jan 21 15:45:36 crc kubenswrapper[5021]: I0121 15:45:36.167385 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bfgxq\" (UniqueName: \"kubernetes.io/projected/7ccf7211-3a03-41f1-839a-7bda93e55d4b-kube-api-access-bfgxq\") pod \"cinder-api-0\" (UID: \"7ccf7211-3a03-41f1-839a-7bda93e55d4b\") " pod="openstack/cinder-api-0" Jan 21 15:45:36 crc kubenswrapper[5021]: I0121 15:45:36.167419 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7ccf7211-3a03-41f1-839a-7bda93e55d4b-etc-machine-id\") pod \"cinder-api-0\" (UID: \"7ccf7211-3a03-41f1-839a-7bda93e55d4b\") " pod="openstack/cinder-api-0" Jan 21 15:45:36 crc kubenswrapper[5021]: I0121 15:45:36.167449 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7ccf7211-3a03-41f1-839a-7bda93e55d4b-logs\") pod \"cinder-api-0\" (UID: \"7ccf7211-3a03-41f1-839a-7bda93e55d4b\") " pod="openstack/cinder-api-0" Jan 21 15:45:36 crc kubenswrapper[5021]: I0121 15:45:36.167471 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7ccf7211-3a03-41f1-839a-7bda93e55d4b-config-data-custom\") pod \"cinder-api-0\" (UID: \"7ccf7211-3a03-41f1-839a-7bda93e55d4b\") " pod="openstack/cinder-api-0" Jan 21 15:45:36 crc kubenswrapper[5021]: I0121 15:45:36.269549 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ccf7211-3a03-41f1-839a-7bda93e55d4b-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"7ccf7211-3a03-41f1-839a-7bda93e55d4b\") " pod="openstack/cinder-api-0" Jan 21 15:45:36 crc kubenswrapper[5021]: I0121 15:45:36.269627 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ccf7211-3a03-41f1-839a-7bda93e55d4b-config-data\") pod \"cinder-api-0\" (UID: \"7ccf7211-3a03-41f1-839a-7bda93e55d4b\") " pod="openstack/cinder-api-0" Jan 21 15:45:36 crc kubenswrapper[5021]: I0121 15:45:36.269674 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bfgxq\" (UniqueName: \"kubernetes.io/projected/7ccf7211-3a03-41f1-839a-7bda93e55d4b-kube-api-access-bfgxq\") pod \"cinder-api-0\" (UID: \"7ccf7211-3a03-41f1-839a-7bda93e55d4b\") " pod="openstack/cinder-api-0" Jan 21 15:45:36 crc kubenswrapper[5021]: I0121 15:45:36.269694 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7ccf7211-3a03-41f1-839a-7bda93e55d4b-etc-machine-id\") pod \"cinder-api-0\" (UID: \"7ccf7211-3a03-41f1-839a-7bda93e55d4b\") " pod="openstack/cinder-api-0" Jan 21 15:45:36 crc kubenswrapper[5021]: I0121 15:45:36.269778 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7ccf7211-3a03-41f1-839a-7bda93e55d4b-logs\") pod \"cinder-api-0\" (UID: \"7ccf7211-3a03-41f1-839a-7bda93e55d4b\") " pod="openstack/cinder-api-0" Jan 21 15:45:36 crc kubenswrapper[5021]: I0121 15:45:36.269861 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7ccf7211-3a03-41f1-839a-7bda93e55d4b-etc-machine-id\") pod \"cinder-api-0\" (UID: \"7ccf7211-3a03-41f1-839a-7bda93e55d4b\") " pod="openstack/cinder-api-0" Jan 21 15:45:36 crc kubenswrapper[5021]: I0121 15:45:36.269958 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7ccf7211-3a03-41f1-839a-7bda93e55d4b-config-data-custom\") pod \"cinder-api-0\" (UID: \"7ccf7211-3a03-41f1-839a-7bda93e55d4b\") " pod="openstack/cinder-api-0" Jan 21 15:45:36 crc kubenswrapper[5021]: I0121 15:45:36.270251 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7ccf7211-3a03-41f1-839a-7bda93e55d4b-logs\") pod \"cinder-api-0\" (UID: \"7ccf7211-3a03-41f1-839a-7bda93e55d4b\") " pod="openstack/cinder-api-0" Jan 21 15:45:36 crc kubenswrapper[5021]: I0121 15:45:36.270947 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ccf7211-3a03-41f1-839a-7bda93e55d4b-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"7ccf7211-3a03-41f1-839a-7bda93e55d4b\") " pod="openstack/cinder-api-0" Jan 21 15:45:36 crc kubenswrapper[5021]: I0121 15:45:36.270985 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ccf7211-3a03-41f1-839a-7bda93e55d4b-public-tls-certs\") pod \"cinder-api-0\" (UID: \"7ccf7211-3a03-41f1-839a-7bda93e55d4b\") " pod="openstack/cinder-api-0" Jan 21 15:45:36 crc kubenswrapper[5021]: I0121 15:45:36.271019 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7ccf7211-3a03-41f1-839a-7bda93e55d4b-scripts\") pod \"cinder-api-0\" (UID: \"7ccf7211-3a03-41f1-839a-7bda93e55d4b\") " pod="openstack/cinder-api-0" Jan 21 15:45:36 crc kubenswrapper[5021]: I0121 15:45:36.277657 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7ccf7211-3a03-41f1-839a-7bda93e55d4b-config-data-custom\") pod \"cinder-api-0\" (UID: \"7ccf7211-3a03-41f1-839a-7bda93e55d4b\") " pod="openstack/cinder-api-0" Jan 21 15:45:36 crc kubenswrapper[5021]: I0121 15:45:36.277707 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7ccf7211-3a03-41f1-839a-7bda93e55d4b-scripts\") pod \"cinder-api-0\" (UID: \"7ccf7211-3a03-41f1-839a-7bda93e55d4b\") " pod="openstack/cinder-api-0" Jan 21 15:45:36 crc kubenswrapper[5021]: I0121 15:45:36.277681 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ccf7211-3a03-41f1-839a-7bda93e55d4b-public-tls-certs\") pod \"cinder-api-0\" (UID: \"7ccf7211-3a03-41f1-839a-7bda93e55d4b\") " pod="openstack/cinder-api-0" Jan 21 15:45:36 crc kubenswrapper[5021]: I0121 15:45:36.277917 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ccf7211-3a03-41f1-839a-7bda93e55d4b-config-data\") pod \"cinder-api-0\" (UID: \"7ccf7211-3a03-41f1-839a-7bda93e55d4b\") " pod="openstack/cinder-api-0" Jan 21 15:45:36 crc kubenswrapper[5021]: I0121 15:45:36.278004 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ccf7211-3a03-41f1-839a-7bda93e55d4b-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"7ccf7211-3a03-41f1-839a-7bda93e55d4b\") " pod="openstack/cinder-api-0" Jan 21 15:45:36 crc kubenswrapper[5021]: I0121 15:45:36.282679 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ccf7211-3a03-41f1-839a-7bda93e55d4b-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"7ccf7211-3a03-41f1-839a-7bda93e55d4b\") " pod="openstack/cinder-api-0" Jan 21 15:45:36 crc kubenswrapper[5021]: I0121 15:45:36.294516 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bfgxq\" (UniqueName: \"kubernetes.io/projected/7ccf7211-3a03-41f1-839a-7bda93e55d4b-kube-api-access-bfgxq\") pod \"cinder-api-0\" (UID: \"7ccf7211-3a03-41f1-839a-7bda93e55d4b\") " pod="openstack/cinder-api-0" Jan 21 15:45:36 crc kubenswrapper[5021]: I0121 15:45:36.389663 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 21 15:45:36 crc kubenswrapper[5021]: I0121 15:45:36.757595 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2feb17bb-dc07-4307-b994-3402ac4a1dbb" path="/var/lib/kubelet/pods/2feb17bb-dc07-4307-b994-3402ac4a1dbb/volumes" Jan 21 15:45:36 crc kubenswrapper[5021]: I0121 15:45:36.910740 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 21 15:45:36 crc kubenswrapper[5021]: I0121 15:45:36.985679 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"7ccf7211-3a03-41f1-839a-7bda93e55d4b","Type":"ContainerStarted","Data":"9b8c21990b5804486fbc2e525d78a868a72c17f1257e2d98eda9e31e3995200e"} Jan 21 15:45:37 crc kubenswrapper[5021]: I0121 15:45:37.068368 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-bb66bbffd-268lb" Jan 21 15:45:37 crc kubenswrapper[5021]: I0121 15:45:37.221781 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-bb66bbffd-268lb" Jan 21 15:45:37 crc kubenswrapper[5021]: I0121 15:45:37.709615 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-7b8886d4fd-qn9sz" Jan 21 15:45:39 crc kubenswrapper[5021]: I0121 15:45:39.248710 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 21 15:45:39 crc kubenswrapper[5021]: I0121 15:45:39.345202 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-674b76c99f-vlmjb" Jan 21 15:45:39 crc kubenswrapper[5021]: I0121 15:45:39.617188 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-tjlrx"] Jan 21 15:45:39 crc kubenswrapper[5021]: I0121 15:45:39.633628 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-57c957c4ff-tjlrx" podUID="bfa74c15-9387-4e75-8597-8ec28d5fea39" containerName="dnsmasq-dns" containerID="cri-o://032a2db9ac8fdcf3e941b9b7c502bf1b6ae493a0a8760720e35e9f8f5adf6882" gracePeriod=10 Jan 21 15:45:39 crc kubenswrapper[5021]: I0121 15:45:39.670533 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Jan 21 15:45:39 crc kubenswrapper[5021]: I0121 15:45:39.705165 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-7b8886d4fd-qn9sz" Jan 21 15:45:39 crc kubenswrapper[5021]: I0121 15:45:39.807341 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-bb66bbffd-268lb"] Jan 21 15:45:39 crc kubenswrapper[5021]: I0121 15:45:39.807563 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-bb66bbffd-268lb" podUID="a0ee318d-b8a6-4810-9d12-4f27ae1ffe10" containerName="barbican-api-log" containerID="cri-o://4db685e2ebf7830711dda7c3e4c0a750beef34f9db60d96ca446d206370a200e" gracePeriod=30 Jan 21 15:45:39 crc kubenswrapper[5021]: I0121 15:45:39.807692 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-bb66bbffd-268lb" podUID="a0ee318d-b8a6-4810-9d12-4f27ae1ffe10" containerName="barbican-api" containerID="cri-o://835ae94f46e8d3a10de61c425c18bc14595bed85ddf2222528e93073ee8ed02d" gracePeriod=30 Jan 21 15:45:40 crc kubenswrapper[5021]: I0121 15:45:40.083013 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 21 15:45:40 crc kubenswrapper[5021]: I0121 15:45:40.228155 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-ld56k" podUID="97b76490-a49c-4ddc-b6ba-7fbda7094851" containerName="frr" probeResult="failure" output="Get \"http://127.0.0.1:7573/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 21 15:45:40 crc kubenswrapper[5021]: I0121 15:45:40.719146 5021 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-57c957c4ff-tjlrx" podUID="bfa74c15-9387-4e75-8597-8ec28d5fea39" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.148:5353: connect: connection refused" Jan 21 15:45:41 crc kubenswrapper[5021]: I0121 15:45:41.038707 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="4d915556-0d78-4e5b-99fa-0c11bad2d1c8" containerName="cinder-scheduler" containerID="cri-o://76e89ea49e8c1303c7bd38a2eb5b57d617464597851c7acc7f011a8232d4f998" gracePeriod=30 Jan 21 15:45:41 crc kubenswrapper[5021]: I0121 15:45:41.039275 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="4d915556-0d78-4e5b-99fa-0c11bad2d1c8" containerName="probe" containerID="cri-o://6309ec3669ff7e2958129f07737666d2240f9f189c3b273e0dfa5bb0a8112966" gracePeriod=30 Jan 21 15:45:42 crc kubenswrapper[5021]: I0121 15:45:42.052521 5021 generic.go:334] "Generic (PLEG): container finished" podID="a0ee318d-b8a6-4810-9d12-4f27ae1ffe10" containerID="4db685e2ebf7830711dda7c3e4c0a750beef34f9db60d96ca446d206370a200e" exitCode=143 Jan 21 15:45:42 crc kubenswrapper[5021]: I0121 15:45:42.052631 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-bb66bbffd-268lb" event={"ID":"a0ee318d-b8a6-4810-9d12-4f27ae1ffe10","Type":"ContainerDied","Data":"4db685e2ebf7830711dda7c3e4c0a750beef34f9db60d96ca446d206370a200e"} Jan 21 15:45:42 crc kubenswrapper[5021]: I0121 15:45:42.059446 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"7ccf7211-3a03-41f1-839a-7bda93e55d4b","Type":"ContainerStarted","Data":"acb12fa4b5f061852748af753502ce94371a4a867002ad11d238b65b996be3e7"} Jan 21 15:45:42 crc kubenswrapper[5021]: I0121 15:45:42.083094 5021 generic.go:334] "Generic (PLEG): container finished" podID="bfa74c15-9387-4e75-8597-8ec28d5fea39" containerID="032a2db9ac8fdcf3e941b9b7c502bf1b6ae493a0a8760720e35e9f8f5adf6882" exitCode=0 Jan 21 15:45:42 crc kubenswrapper[5021]: I0121 15:45:42.083153 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57c957c4ff-tjlrx" event={"ID":"bfa74c15-9387-4e75-8597-8ec28d5fea39","Type":"ContainerDied","Data":"032a2db9ac8fdcf3e941b9b7c502bf1b6ae493a0a8760720e35e9f8f5adf6882"} Jan 21 15:45:42 crc kubenswrapper[5021]: I0121 15:45:42.982759 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57c957c4ff-tjlrx" Jan 21 15:45:43 crc kubenswrapper[5021]: I0121 15:45:43.020854 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bfa74c15-9387-4e75-8597-8ec28d5fea39-dns-swift-storage-0\") pod \"bfa74c15-9387-4e75-8597-8ec28d5fea39\" (UID: \"bfa74c15-9387-4e75-8597-8ec28d5fea39\") " Jan 21 15:45:43 crc kubenswrapper[5021]: I0121 15:45:43.020982 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bfa74c15-9387-4e75-8597-8ec28d5fea39-dns-svc\") pod \"bfa74c15-9387-4e75-8597-8ec28d5fea39\" (UID: \"bfa74c15-9387-4e75-8597-8ec28d5fea39\") " Jan 21 15:45:43 crc kubenswrapper[5021]: I0121 15:45:43.021650 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5vmn7\" (UniqueName: \"kubernetes.io/projected/bfa74c15-9387-4e75-8597-8ec28d5fea39-kube-api-access-5vmn7\") pod \"bfa74c15-9387-4e75-8597-8ec28d5fea39\" (UID: \"bfa74c15-9387-4e75-8597-8ec28d5fea39\") " Jan 21 15:45:43 crc kubenswrapper[5021]: I0121 15:45:43.021743 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bfa74c15-9387-4e75-8597-8ec28d5fea39-config\") pod \"bfa74c15-9387-4e75-8597-8ec28d5fea39\" (UID: \"bfa74c15-9387-4e75-8597-8ec28d5fea39\") " Jan 21 15:45:43 crc kubenswrapper[5021]: I0121 15:45:43.021769 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bfa74c15-9387-4e75-8597-8ec28d5fea39-ovsdbserver-nb\") pod \"bfa74c15-9387-4e75-8597-8ec28d5fea39\" (UID: \"bfa74c15-9387-4e75-8597-8ec28d5fea39\") " Jan 21 15:45:43 crc kubenswrapper[5021]: I0121 15:45:43.021895 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bfa74c15-9387-4e75-8597-8ec28d5fea39-ovsdbserver-sb\") pod \"bfa74c15-9387-4e75-8597-8ec28d5fea39\" (UID: \"bfa74c15-9387-4e75-8597-8ec28d5fea39\") " Jan 21 15:45:43 crc kubenswrapper[5021]: I0121 15:45:43.030780 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bfa74c15-9387-4e75-8597-8ec28d5fea39-kube-api-access-5vmn7" (OuterVolumeSpecName: "kube-api-access-5vmn7") pod "bfa74c15-9387-4e75-8597-8ec28d5fea39" (UID: "bfa74c15-9387-4e75-8597-8ec28d5fea39"). InnerVolumeSpecName "kube-api-access-5vmn7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:45:43 crc kubenswrapper[5021]: I0121 15:45:43.108635 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bfa74c15-9387-4e75-8597-8ec28d5fea39-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "bfa74c15-9387-4e75-8597-8ec28d5fea39" (UID: "bfa74c15-9387-4e75-8597-8ec28d5fea39"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:45:43 crc kubenswrapper[5021]: I0121 15:45:43.112633 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57c957c4ff-tjlrx" event={"ID":"bfa74c15-9387-4e75-8597-8ec28d5fea39","Type":"ContainerDied","Data":"eb2ee5a9b475340c216165e1e754c2582dfa35e9d93cb1d5f3c4628b832dc193"} Jan 21 15:45:43 crc kubenswrapper[5021]: I0121 15:45:43.112711 5021 scope.go:117] "RemoveContainer" containerID="032a2db9ac8fdcf3e941b9b7c502bf1b6ae493a0a8760720e35e9f8f5adf6882" Jan 21 15:45:43 crc kubenswrapper[5021]: I0121 15:45:43.112862 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57c957c4ff-tjlrx" Jan 21 15:45:43 crc kubenswrapper[5021]: I0121 15:45:43.118840 5021 generic.go:334] "Generic (PLEG): container finished" podID="4d915556-0d78-4e5b-99fa-0c11bad2d1c8" containerID="6309ec3669ff7e2958129f07737666d2240f9f189c3b273e0dfa5bb0a8112966" exitCode=0 Jan 21 15:45:43 crc kubenswrapper[5021]: I0121 15:45:43.118876 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"4d915556-0d78-4e5b-99fa-0c11bad2d1c8","Type":"ContainerDied","Data":"6309ec3669ff7e2958129f07737666d2240f9f189c3b273e0dfa5bb0a8112966"} Jan 21 15:45:43 crc kubenswrapper[5021]: I0121 15:45:43.124072 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bfa74c15-9387-4e75-8597-8ec28d5fea39-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "bfa74c15-9387-4e75-8597-8ec28d5fea39" (UID: "bfa74c15-9387-4e75-8597-8ec28d5fea39"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:45:43 crc kubenswrapper[5021]: I0121 15:45:43.124218 5021 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bfa74c15-9387-4e75-8597-8ec28d5fea39-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:43 crc kubenswrapper[5021]: I0121 15:45:43.124236 5021 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bfa74c15-9387-4e75-8597-8ec28d5fea39-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:43 crc kubenswrapper[5021]: I0121 15:45:43.124249 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5vmn7\" (UniqueName: \"kubernetes.io/projected/bfa74c15-9387-4e75-8597-8ec28d5fea39-kube-api-access-5vmn7\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:43 crc kubenswrapper[5021]: I0121 15:45:43.124644 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bfa74c15-9387-4e75-8597-8ec28d5fea39-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "bfa74c15-9387-4e75-8597-8ec28d5fea39" (UID: "bfa74c15-9387-4e75-8597-8ec28d5fea39"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:45:43 crc kubenswrapper[5021]: I0121 15:45:43.145449 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bfa74c15-9387-4e75-8597-8ec28d5fea39-config" (OuterVolumeSpecName: "config") pod "bfa74c15-9387-4e75-8597-8ec28d5fea39" (UID: "bfa74c15-9387-4e75-8597-8ec28d5fea39"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:45:43 crc kubenswrapper[5021]: I0121 15:45:43.182561 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bfa74c15-9387-4e75-8597-8ec28d5fea39-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "bfa74c15-9387-4e75-8597-8ec28d5fea39" (UID: "bfa74c15-9387-4e75-8597-8ec28d5fea39"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:45:43 crc kubenswrapper[5021]: I0121 15:45:43.212213 5021 scope.go:117] "RemoveContainer" containerID="83989d4993d161ae52f992021ec35d82a1a4a79be3a4760f4eb044f8cf112c45" Jan 21 15:45:43 crc kubenswrapper[5021]: I0121 15:45:43.227264 5021 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bfa74c15-9387-4e75-8597-8ec28d5fea39-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:43 crc kubenswrapper[5021]: I0121 15:45:43.227302 5021 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bfa74c15-9387-4e75-8597-8ec28d5fea39-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:43 crc kubenswrapper[5021]: I0121 15:45:43.227344 5021 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bfa74c15-9387-4e75-8597-8ec28d5fea39-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:43 crc kubenswrapper[5021]: I0121 15:45:43.452356 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-tjlrx"] Jan 21 15:45:43 crc kubenswrapper[5021]: I0121 15:45:43.480300 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-tjlrx"] Jan 21 15:45:43 crc kubenswrapper[5021]: I0121 15:45:43.753807 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-548db5cc6d-pjhdh" Jan 21 15:45:44 crc kubenswrapper[5021]: I0121 15:45:44.603241 5021 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-bb66bbffd-268lb" podUID="a0ee318d-b8a6-4810-9d12-4f27ae1ffe10" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.161:9311/healthcheck\": dial tcp 10.217.0.161:9311: connect: connection refused" Jan 21 15:45:44 crc kubenswrapper[5021]: I0121 15:45:44.603337 5021 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-bb66bbffd-268lb" podUID="a0ee318d-b8a6-4810-9d12-4f27ae1ffe10" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.161:9311/healthcheck\": dial tcp 10.217.0.161:9311: connect: connection refused" Jan 21 15:45:44 crc kubenswrapper[5021]: I0121 15:45:44.750076 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bfa74c15-9387-4e75-8597-8ec28d5fea39" path="/var/lib/kubelet/pods/bfa74c15-9387-4e75-8597-8ec28d5fea39/volumes" Jan 21 15:45:44 crc kubenswrapper[5021]: I0121 15:45:44.852552 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-57f8ddbc76-dgfjh" Jan 21 15:45:45 crc kubenswrapper[5021]: I0121 15:45:45.147213 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"7ccf7211-3a03-41f1-839a-7bda93e55d4b","Type":"ContainerStarted","Data":"34073a8e93f07e196867b52269b44be932eaae6b829c7faf37daff1fefaef5dd"} Jan 21 15:45:45 crc kubenswrapper[5021]: I0121 15:45:45.147572 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Jan 21 15:45:45 crc kubenswrapper[5021]: I0121 15:45:45.150491 5021 generic.go:334] "Generic (PLEG): container finished" podID="4d915556-0d78-4e5b-99fa-0c11bad2d1c8" containerID="76e89ea49e8c1303c7bd38a2eb5b57d617464597851c7acc7f011a8232d4f998" exitCode=0 Jan 21 15:45:45 crc kubenswrapper[5021]: I0121 15:45:45.150541 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"4d915556-0d78-4e5b-99fa-0c11bad2d1c8","Type":"ContainerDied","Data":"76e89ea49e8c1303c7bd38a2eb5b57d617464597851c7acc7f011a8232d4f998"} Jan 21 15:45:45 crc kubenswrapper[5021]: I0121 15:45:45.152711 5021 generic.go:334] "Generic (PLEG): container finished" podID="a0ee318d-b8a6-4810-9d12-4f27ae1ffe10" containerID="835ae94f46e8d3a10de61c425c18bc14595bed85ddf2222528e93073ee8ed02d" exitCode=0 Jan 21 15:45:45 crc kubenswrapper[5021]: I0121 15:45:45.152736 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-bb66bbffd-268lb" event={"ID":"a0ee318d-b8a6-4810-9d12-4f27ae1ffe10","Type":"ContainerDied","Data":"835ae94f46e8d3a10de61c425c18bc14595bed85ddf2222528e93073ee8ed02d"} Jan 21 15:45:45 crc kubenswrapper[5021]: I0121 15:45:45.152750 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-bb66bbffd-268lb" event={"ID":"a0ee318d-b8a6-4810-9d12-4f27ae1ffe10","Type":"ContainerDied","Data":"7619a0df4c4385ef9b6a2f7b158d72fbd2c98ff0173501c9eba1a9265316a8c9"} Jan 21 15:45:45 crc kubenswrapper[5021]: I0121 15:45:45.152760 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7619a0df4c4385ef9b6a2f7b158d72fbd2c98ff0173501c9eba1a9265316a8c9" Jan 21 15:45:45 crc kubenswrapper[5021]: I0121 15:45:45.174288 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=9.174266836 podStartE2EDuration="9.174266836s" podCreationTimestamp="2026-01-21 15:45:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:45:45.167232587 +0000 UTC m=+1286.702346476" watchObservedRunningTime="2026-01-21 15:45:45.174266836 +0000 UTC m=+1286.709380725" Jan 21 15:45:45 crc kubenswrapper[5021]: I0121 15:45:45.191829 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-bb66bbffd-268lb" Jan 21 15:45:45 crc kubenswrapper[5021]: I0121 15:45:45.263290 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a0ee318d-b8a6-4810-9d12-4f27ae1ffe10-logs\") pod \"a0ee318d-b8a6-4810-9d12-4f27ae1ffe10\" (UID: \"a0ee318d-b8a6-4810-9d12-4f27ae1ffe10\") " Jan 21 15:45:45 crc kubenswrapper[5021]: I0121 15:45:45.263495 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0ee318d-b8a6-4810-9d12-4f27ae1ffe10-combined-ca-bundle\") pod \"a0ee318d-b8a6-4810-9d12-4f27ae1ffe10\" (UID: \"a0ee318d-b8a6-4810-9d12-4f27ae1ffe10\") " Jan 21 15:45:45 crc kubenswrapper[5021]: I0121 15:45:45.263630 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a0ee318d-b8a6-4810-9d12-4f27ae1ffe10-config-data-custom\") pod \"a0ee318d-b8a6-4810-9d12-4f27ae1ffe10\" (UID: \"a0ee318d-b8a6-4810-9d12-4f27ae1ffe10\") " Jan 21 15:45:45 crc kubenswrapper[5021]: I0121 15:45:45.263679 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0ee318d-b8a6-4810-9d12-4f27ae1ffe10-config-data\") pod \"a0ee318d-b8a6-4810-9d12-4f27ae1ffe10\" (UID: \"a0ee318d-b8a6-4810-9d12-4f27ae1ffe10\") " Jan 21 15:45:45 crc kubenswrapper[5021]: I0121 15:45:45.263720 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qgsbg\" (UniqueName: \"kubernetes.io/projected/a0ee318d-b8a6-4810-9d12-4f27ae1ffe10-kube-api-access-qgsbg\") pod \"a0ee318d-b8a6-4810-9d12-4f27ae1ffe10\" (UID: \"a0ee318d-b8a6-4810-9d12-4f27ae1ffe10\") " Jan 21 15:45:45 crc kubenswrapper[5021]: I0121 15:45:45.264691 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a0ee318d-b8a6-4810-9d12-4f27ae1ffe10-logs" (OuterVolumeSpecName: "logs") pod "a0ee318d-b8a6-4810-9d12-4f27ae1ffe10" (UID: "a0ee318d-b8a6-4810-9d12-4f27ae1ffe10"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:45:45 crc kubenswrapper[5021]: I0121 15:45:45.290392 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0ee318d-b8a6-4810-9d12-4f27ae1ffe10-kube-api-access-qgsbg" (OuterVolumeSpecName: "kube-api-access-qgsbg") pod "a0ee318d-b8a6-4810-9d12-4f27ae1ffe10" (UID: "a0ee318d-b8a6-4810-9d12-4f27ae1ffe10"). InnerVolumeSpecName "kube-api-access-qgsbg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:45:45 crc kubenswrapper[5021]: I0121 15:45:45.290640 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0ee318d-b8a6-4810-9d12-4f27ae1ffe10-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "a0ee318d-b8a6-4810-9d12-4f27ae1ffe10" (UID: "a0ee318d-b8a6-4810-9d12-4f27ae1ffe10"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:45:45 crc kubenswrapper[5021]: I0121 15:45:45.294073 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0ee318d-b8a6-4810-9d12-4f27ae1ffe10-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a0ee318d-b8a6-4810-9d12-4f27ae1ffe10" (UID: "a0ee318d-b8a6-4810-9d12-4f27ae1ffe10"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:45:45 crc kubenswrapper[5021]: I0121 15:45:45.327110 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0ee318d-b8a6-4810-9d12-4f27ae1ffe10-config-data" (OuterVolumeSpecName: "config-data") pod "a0ee318d-b8a6-4810-9d12-4f27ae1ffe10" (UID: "a0ee318d-b8a6-4810-9d12-4f27ae1ffe10"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:45:45 crc kubenswrapper[5021]: I0121 15:45:45.366382 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qgsbg\" (UniqueName: \"kubernetes.io/projected/a0ee318d-b8a6-4810-9d12-4f27ae1ffe10-kube-api-access-qgsbg\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:45 crc kubenswrapper[5021]: I0121 15:45:45.366417 5021 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a0ee318d-b8a6-4810-9d12-4f27ae1ffe10-logs\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:45 crc kubenswrapper[5021]: I0121 15:45:45.366428 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0ee318d-b8a6-4810-9d12-4f27ae1ffe10-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:45 crc kubenswrapper[5021]: I0121 15:45:45.366439 5021 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a0ee318d-b8a6-4810-9d12-4f27ae1ffe10-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:45 crc kubenswrapper[5021]: I0121 15:45:45.366448 5021 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0ee318d-b8a6-4810-9d12-4f27ae1ffe10-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:45 crc kubenswrapper[5021]: I0121 15:45:45.436165 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 21 15:45:45 crc kubenswrapper[5021]: I0121 15:45:45.467215 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4d915556-0d78-4e5b-99fa-0c11bad2d1c8-config-data\") pod \"4d915556-0d78-4e5b-99fa-0c11bad2d1c8\" (UID: \"4d915556-0d78-4e5b-99fa-0c11bad2d1c8\") " Jan 21 15:45:45 crc kubenswrapper[5021]: I0121 15:45:45.467274 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4d915556-0d78-4e5b-99fa-0c11bad2d1c8-scripts\") pod \"4d915556-0d78-4e5b-99fa-0c11bad2d1c8\" (UID: \"4d915556-0d78-4e5b-99fa-0c11bad2d1c8\") " Jan 21 15:45:45 crc kubenswrapper[5021]: I0121 15:45:45.467346 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4d915556-0d78-4e5b-99fa-0c11bad2d1c8-config-data-custom\") pod \"4d915556-0d78-4e5b-99fa-0c11bad2d1c8\" (UID: \"4d915556-0d78-4e5b-99fa-0c11bad2d1c8\") " Jan 21 15:45:45 crc kubenswrapper[5021]: I0121 15:45:45.467380 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d915556-0d78-4e5b-99fa-0c11bad2d1c8-combined-ca-bundle\") pod \"4d915556-0d78-4e5b-99fa-0c11bad2d1c8\" (UID: \"4d915556-0d78-4e5b-99fa-0c11bad2d1c8\") " Jan 21 15:45:45 crc kubenswrapper[5021]: I0121 15:45:45.467443 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4d915556-0d78-4e5b-99fa-0c11bad2d1c8-etc-machine-id\") pod \"4d915556-0d78-4e5b-99fa-0c11bad2d1c8\" (UID: \"4d915556-0d78-4e5b-99fa-0c11bad2d1c8\") " Jan 21 15:45:45 crc kubenswrapper[5021]: I0121 15:45:45.467534 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z97vx\" (UniqueName: \"kubernetes.io/projected/4d915556-0d78-4e5b-99fa-0c11bad2d1c8-kube-api-access-z97vx\") pod \"4d915556-0d78-4e5b-99fa-0c11bad2d1c8\" (UID: \"4d915556-0d78-4e5b-99fa-0c11bad2d1c8\") " Jan 21 15:45:45 crc kubenswrapper[5021]: I0121 15:45:45.468720 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4d915556-0d78-4e5b-99fa-0c11bad2d1c8-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "4d915556-0d78-4e5b-99fa-0c11bad2d1c8" (UID: "4d915556-0d78-4e5b-99fa-0c11bad2d1c8"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 15:45:45 crc kubenswrapper[5021]: I0121 15:45:45.473217 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4d915556-0d78-4e5b-99fa-0c11bad2d1c8-kube-api-access-z97vx" (OuterVolumeSpecName: "kube-api-access-z97vx") pod "4d915556-0d78-4e5b-99fa-0c11bad2d1c8" (UID: "4d915556-0d78-4e5b-99fa-0c11bad2d1c8"). InnerVolumeSpecName "kube-api-access-z97vx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:45:45 crc kubenswrapper[5021]: I0121 15:45:45.479357 5021 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4d915556-0d78-4e5b-99fa-0c11bad2d1c8-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:45 crc kubenswrapper[5021]: I0121 15:45:45.479392 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z97vx\" (UniqueName: \"kubernetes.io/projected/4d915556-0d78-4e5b-99fa-0c11bad2d1c8-kube-api-access-z97vx\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:45 crc kubenswrapper[5021]: I0121 15:45:45.483143 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4d915556-0d78-4e5b-99fa-0c11bad2d1c8-scripts" (OuterVolumeSpecName: "scripts") pod "4d915556-0d78-4e5b-99fa-0c11bad2d1c8" (UID: "4d915556-0d78-4e5b-99fa-0c11bad2d1c8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:45:45 crc kubenswrapper[5021]: I0121 15:45:45.483360 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4d915556-0d78-4e5b-99fa-0c11bad2d1c8-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "4d915556-0d78-4e5b-99fa-0c11bad2d1c8" (UID: "4d915556-0d78-4e5b-99fa-0c11bad2d1c8"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:45:45 crc kubenswrapper[5021]: I0121 15:45:45.527803 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4d915556-0d78-4e5b-99fa-0c11bad2d1c8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4d915556-0d78-4e5b-99fa-0c11bad2d1c8" (UID: "4d915556-0d78-4e5b-99fa-0c11bad2d1c8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:45:45 crc kubenswrapper[5021]: I0121 15:45:45.570250 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4d915556-0d78-4e5b-99fa-0c11bad2d1c8-config-data" (OuterVolumeSpecName: "config-data") pod "4d915556-0d78-4e5b-99fa-0c11bad2d1c8" (UID: "4d915556-0d78-4e5b-99fa-0c11bad2d1c8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:45:45 crc kubenswrapper[5021]: I0121 15:45:45.581458 5021 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4d915556-0d78-4e5b-99fa-0c11bad2d1c8-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:45 crc kubenswrapper[5021]: I0121 15:45:45.581480 5021 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4d915556-0d78-4e5b-99fa-0c11bad2d1c8-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:45 crc kubenswrapper[5021]: I0121 15:45:45.581489 5021 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4d915556-0d78-4e5b-99fa-0c11bad2d1c8-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:45 crc kubenswrapper[5021]: I0121 15:45:45.581501 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d915556-0d78-4e5b-99fa-0c11bad2d1c8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:45 crc kubenswrapper[5021]: I0121 15:45:45.892065 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-57f8ddbc76-dgfjh" Jan 21 15:45:46 crc kubenswrapper[5021]: I0121 15:45:46.168128 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"4d915556-0d78-4e5b-99fa-0c11bad2d1c8","Type":"ContainerDied","Data":"3d59b0bdecab517aceca0b845280156f65593dd4c2a1e2d7962cdd9901c89e15"} Jan 21 15:45:46 crc kubenswrapper[5021]: I0121 15:45:46.168153 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 21 15:45:46 crc kubenswrapper[5021]: I0121 15:45:46.168164 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-bb66bbffd-268lb" Jan 21 15:45:46 crc kubenswrapper[5021]: I0121 15:45:46.168230 5021 scope.go:117] "RemoveContainer" containerID="6309ec3669ff7e2958129f07737666d2240f9f189c3b273e0dfa5bb0a8112966" Jan 21 15:45:46 crc kubenswrapper[5021]: I0121 15:45:46.192788 5021 scope.go:117] "RemoveContainer" containerID="76e89ea49e8c1303c7bd38a2eb5b57d617464597851c7acc7f011a8232d4f998" Jan 21 15:45:46 crc kubenswrapper[5021]: I0121 15:45:46.224186 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 21 15:45:46 crc kubenswrapper[5021]: I0121 15:45:46.239128 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 21 15:45:46 crc kubenswrapper[5021]: I0121 15:45:46.264617 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-bb66bbffd-268lb"] Jan 21 15:45:46 crc kubenswrapper[5021]: I0121 15:45:46.291553 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-bb66bbffd-268lb"] Jan 21 15:45:46 crc kubenswrapper[5021]: I0121 15:45:46.302537 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Jan 21 15:45:46 crc kubenswrapper[5021]: E0121 15:45:46.303861 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0ee318d-b8a6-4810-9d12-4f27ae1ffe10" containerName="barbican-api-log" Jan 21 15:45:46 crc kubenswrapper[5021]: I0121 15:45:46.312071 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0ee318d-b8a6-4810-9d12-4f27ae1ffe10" containerName="barbican-api-log" Jan 21 15:45:46 crc kubenswrapper[5021]: E0121 15:45:46.312120 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0ee318d-b8a6-4810-9d12-4f27ae1ffe10" containerName="barbican-api" Jan 21 15:45:46 crc kubenswrapper[5021]: I0121 15:45:46.312129 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0ee318d-b8a6-4810-9d12-4f27ae1ffe10" containerName="barbican-api" Jan 21 15:45:46 crc kubenswrapper[5021]: E0121 15:45:46.312159 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d915556-0d78-4e5b-99fa-0c11bad2d1c8" containerName="probe" Jan 21 15:45:46 crc kubenswrapper[5021]: I0121 15:45:46.312165 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d915556-0d78-4e5b-99fa-0c11bad2d1c8" containerName="probe" Jan 21 15:45:46 crc kubenswrapper[5021]: E0121 15:45:46.312186 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bfa74c15-9387-4e75-8597-8ec28d5fea39" containerName="init" Jan 21 15:45:46 crc kubenswrapper[5021]: I0121 15:45:46.312193 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="bfa74c15-9387-4e75-8597-8ec28d5fea39" containerName="init" Jan 21 15:45:46 crc kubenswrapper[5021]: E0121 15:45:46.312246 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d915556-0d78-4e5b-99fa-0c11bad2d1c8" containerName="cinder-scheduler" Jan 21 15:45:46 crc kubenswrapper[5021]: I0121 15:45:46.312274 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d915556-0d78-4e5b-99fa-0c11bad2d1c8" containerName="cinder-scheduler" Jan 21 15:45:46 crc kubenswrapper[5021]: E0121 15:45:46.312300 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bfa74c15-9387-4e75-8597-8ec28d5fea39" containerName="dnsmasq-dns" Jan 21 15:45:46 crc kubenswrapper[5021]: I0121 15:45:46.312307 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="bfa74c15-9387-4e75-8597-8ec28d5fea39" containerName="dnsmasq-dns" Jan 21 15:45:46 crc kubenswrapper[5021]: I0121 15:45:46.313037 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="a0ee318d-b8a6-4810-9d12-4f27ae1ffe10" containerName="barbican-api" Jan 21 15:45:46 crc kubenswrapper[5021]: I0121 15:45:46.313061 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="bfa74c15-9387-4e75-8597-8ec28d5fea39" containerName="dnsmasq-dns" Jan 21 15:45:46 crc kubenswrapper[5021]: I0121 15:45:46.313082 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d915556-0d78-4e5b-99fa-0c11bad2d1c8" containerName="probe" Jan 21 15:45:46 crc kubenswrapper[5021]: I0121 15:45:46.313097 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d915556-0d78-4e5b-99fa-0c11bad2d1c8" containerName="cinder-scheduler" Jan 21 15:45:46 crc kubenswrapper[5021]: I0121 15:45:46.313110 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="a0ee318d-b8a6-4810-9d12-4f27ae1ffe10" containerName="barbican-api-log" Jan 21 15:45:46 crc kubenswrapper[5021]: I0121 15:45:46.314867 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 21 15:45:46 crc kubenswrapper[5021]: I0121 15:45:46.314989 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 21 15:45:46 crc kubenswrapper[5021]: I0121 15:45:46.318224 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Jan 21 15:45:46 crc kubenswrapper[5021]: I0121 15:45:46.432527 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7369dbac-285b-4322-8322-41b1b450d199-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"7369dbac-285b-4322-8322-41b1b450d199\") " pod="openstack/cinder-scheduler-0" Jan 21 15:45:46 crc kubenswrapper[5021]: I0121 15:45:46.432807 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7369dbac-285b-4322-8322-41b1b450d199-config-data\") pod \"cinder-scheduler-0\" (UID: \"7369dbac-285b-4322-8322-41b1b450d199\") " pod="openstack/cinder-scheduler-0" Jan 21 15:45:46 crc kubenswrapper[5021]: I0121 15:45:46.432854 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7369dbac-285b-4322-8322-41b1b450d199-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"7369dbac-285b-4322-8322-41b1b450d199\") " pod="openstack/cinder-scheduler-0" Jan 21 15:45:46 crc kubenswrapper[5021]: I0121 15:45:46.432916 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c8pcv\" (UniqueName: \"kubernetes.io/projected/7369dbac-285b-4322-8322-41b1b450d199-kube-api-access-c8pcv\") pod \"cinder-scheduler-0\" (UID: \"7369dbac-285b-4322-8322-41b1b450d199\") " pod="openstack/cinder-scheduler-0" Jan 21 15:45:46 crc kubenswrapper[5021]: I0121 15:45:46.433249 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7369dbac-285b-4322-8322-41b1b450d199-scripts\") pod \"cinder-scheduler-0\" (UID: \"7369dbac-285b-4322-8322-41b1b450d199\") " pod="openstack/cinder-scheduler-0" Jan 21 15:45:46 crc kubenswrapper[5021]: I0121 15:45:46.433385 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7369dbac-285b-4322-8322-41b1b450d199-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"7369dbac-285b-4322-8322-41b1b450d199\") " pod="openstack/cinder-scheduler-0" Jan 21 15:45:46 crc kubenswrapper[5021]: I0121 15:45:46.534787 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7369dbac-285b-4322-8322-41b1b450d199-config-data\") pod \"cinder-scheduler-0\" (UID: \"7369dbac-285b-4322-8322-41b1b450d199\") " pod="openstack/cinder-scheduler-0" Jan 21 15:45:46 crc kubenswrapper[5021]: I0121 15:45:46.534831 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7369dbac-285b-4322-8322-41b1b450d199-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"7369dbac-285b-4322-8322-41b1b450d199\") " pod="openstack/cinder-scheduler-0" Jan 21 15:45:46 crc kubenswrapper[5021]: I0121 15:45:46.534862 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c8pcv\" (UniqueName: \"kubernetes.io/projected/7369dbac-285b-4322-8322-41b1b450d199-kube-api-access-c8pcv\") pod \"cinder-scheduler-0\" (UID: \"7369dbac-285b-4322-8322-41b1b450d199\") " pod="openstack/cinder-scheduler-0" Jan 21 15:45:46 crc kubenswrapper[5021]: I0121 15:45:46.534949 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7369dbac-285b-4322-8322-41b1b450d199-scripts\") pod \"cinder-scheduler-0\" (UID: \"7369dbac-285b-4322-8322-41b1b450d199\") " pod="openstack/cinder-scheduler-0" Jan 21 15:45:46 crc kubenswrapper[5021]: I0121 15:45:46.534985 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7369dbac-285b-4322-8322-41b1b450d199-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"7369dbac-285b-4322-8322-41b1b450d199\") " pod="openstack/cinder-scheduler-0" Jan 21 15:45:46 crc kubenswrapper[5021]: I0121 15:45:46.535003 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7369dbac-285b-4322-8322-41b1b450d199-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"7369dbac-285b-4322-8322-41b1b450d199\") " pod="openstack/cinder-scheduler-0" Jan 21 15:45:46 crc kubenswrapper[5021]: I0121 15:45:46.535107 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7369dbac-285b-4322-8322-41b1b450d199-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"7369dbac-285b-4322-8322-41b1b450d199\") " pod="openstack/cinder-scheduler-0" Jan 21 15:45:46 crc kubenswrapper[5021]: I0121 15:45:46.539600 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7369dbac-285b-4322-8322-41b1b450d199-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"7369dbac-285b-4322-8322-41b1b450d199\") " pod="openstack/cinder-scheduler-0" Jan 21 15:45:46 crc kubenswrapper[5021]: I0121 15:45:46.540487 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7369dbac-285b-4322-8322-41b1b450d199-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"7369dbac-285b-4322-8322-41b1b450d199\") " pod="openstack/cinder-scheduler-0" Jan 21 15:45:46 crc kubenswrapper[5021]: I0121 15:45:46.540494 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7369dbac-285b-4322-8322-41b1b450d199-scripts\") pod \"cinder-scheduler-0\" (UID: \"7369dbac-285b-4322-8322-41b1b450d199\") " pod="openstack/cinder-scheduler-0" Jan 21 15:45:46 crc kubenswrapper[5021]: I0121 15:45:46.545061 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7369dbac-285b-4322-8322-41b1b450d199-config-data\") pod \"cinder-scheduler-0\" (UID: \"7369dbac-285b-4322-8322-41b1b450d199\") " pod="openstack/cinder-scheduler-0" Jan 21 15:45:46 crc kubenswrapper[5021]: I0121 15:45:46.555002 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c8pcv\" (UniqueName: \"kubernetes.io/projected/7369dbac-285b-4322-8322-41b1b450d199-kube-api-access-c8pcv\") pod \"cinder-scheduler-0\" (UID: \"7369dbac-285b-4322-8322-41b1b450d199\") " pod="openstack/cinder-scheduler-0" Jan 21 15:45:46 crc kubenswrapper[5021]: I0121 15:45:46.639722 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 21 15:45:46 crc kubenswrapper[5021]: I0121 15:45:46.761130 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4d915556-0d78-4e5b-99fa-0c11bad2d1c8" path="/var/lib/kubelet/pods/4d915556-0d78-4e5b-99fa-0c11bad2d1c8/volumes" Jan 21 15:45:46 crc kubenswrapper[5021]: I0121 15:45:46.762451 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0ee318d-b8a6-4810-9d12-4f27ae1ffe10" path="/var/lib/kubelet/pods/a0ee318d-b8a6-4810-9d12-4f27ae1ffe10/volumes" Jan 21 15:45:47 crc kubenswrapper[5021]: I0121 15:45:47.123831 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 21 15:45:47 crc kubenswrapper[5021]: I0121 15:45:47.178966 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"7369dbac-285b-4322-8322-41b1b450d199","Type":"ContainerStarted","Data":"ea879a2ff02e39787f2c34829176f75371e1c7090f282775f6dc547cc945a7b7"} Jan 21 15:45:47 crc kubenswrapper[5021]: I0121 15:45:47.801591 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Jan 21 15:45:47 crc kubenswrapper[5021]: I0121 15:45:47.803319 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 21 15:45:47 crc kubenswrapper[5021]: I0121 15:45:47.806208 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Jan 21 15:45:47 crc kubenswrapper[5021]: I0121 15:45:47.813376 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 21 15:45:47 crc kubenswrapper[5021]: I0121 15:45:47.814331 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Jan 21 15:45:47 crc kubenswrapper[5021]: I0121 15:45:47.816679 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-kk45z" Jan 21 15:45:47 crc kubenswrapper[5021]: I0121 15:45:47.962285 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca-combined-ca-bundle\") pod \"openstackclient\" (UID: \"deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca\") " pod="openstack/openstackclient" Jan 21 15:45:47 crc kubenswrapper[5021]: I0121 15:45:47.962433 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca-openstack-config\") pod \"openstackclient\" (UID: \"deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca\") " pod="openstack/openstackclient" Jan 21 15:45:47 crc kubenswrapper[5021]: I0121 15:45:47.962515 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca-openstack-config-secret\") pod \"openstackclient\" (UID: \"deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca\") " pod="openstack/openstackclient" Jan 21 15:45:47 crc kubenswrapper[5021]: I0121 15:45:47.962563 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t5q2w\" (UniqueName: \"kubernetes.io/projected/deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca-kube-api-access-t5q2w\") pod \"openstackclient\" (UID: \"deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca\") " pod="openstack/openstackclient" Jan 21 15:45:48 crc kubenswrapper[5021]: I0121 15:45:48.064175 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca-combined-ca-bundle\") pod \"openstackclient\" (UID: \"deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca\") " pod="openstack/openstackclient" Jan 21 15:45:48 crc kubenswrapper[5021]: I0121 15:45:48.064245 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca-openstack-config\") pod \"openstackclient\" (UID: \"deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca\") " pod="openstack/openstackclient" Jan 21 15:45:48 crc kubenswrapper[5021]: I0121 15:45:48.064313 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca-openstack-config-secret\") pod \"openstackclient\" (UID: \"deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca\") " pod="openstack/openstackclient" Jan 21 15:45:48 crc kubenswrapper[5021]: I0121 15:45:48.064346 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t5q2w\" (UniqueName: \"kubernetes.io/projected/deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca-kube-api-access-t5q2w\") pod \"openstackclient\" (UID: \"deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca\") " pod="openstack/openstackclient" Jan 21 15:45:48 crc kubenswrapper[5021]: I0121 15:45:48.068632 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca-openstack-config\") pod \"openstackclient\" (UID: \"deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca\") " pod="openstack/openstackclient" Jan 21 15:45:48 crc kubenswrapper[5021]: I0121 15:45:48.068884 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca-combined-ca-bundle\") pod \"openstackclient\" (UID: \"deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca\") " pod="openstack/openstackclient" Jan 21 15:45:48 crc kubenswrapper[5021]: I0121 15:45:48.074547 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca-openstack-config-secret\") pod \"openstackclient\" (UID: \"deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca\") " pod="openstack/openstackclient" Jan 21 15:45:48 crc kubenswrapper[5021]: I0121 15:45:48.089011 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t5q2w\" (UniqueName: \"kubernetes.io/projected/deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca-kube-api-access-t5q2w\") pod \"openstackclient\" (UID: \"deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca\") " pod="openstack/openstackclient" Jan 21 15:45:48 crc kubenswrapper[5021]: I0121 15:45:48.189218 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 21 15:45:48 crc kubenswrapper[5021]: I0121 15:45:48.198349 5021 generic.go:334] "Generic (PLEG): container finished" podID="cb49e3b7-78e5-4094-9bf0-d25f350d70a2" containerID="4da65adcb8ccfeb9a613436dbfa586305e06ea8f16f79bf23435b620e6b5c598" exitCode=0 Jan 21 15:45:48 crc kubenswrapper[5021]: I0121 15:45:48.198414 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-gm6fx" event={"ID":"cb49e3b7-78e5-4094-9bf0-d25f350d70a2","Type":"ContainerDied","Data":"4da65adcb8ccfeb9a613436dbfa586305e06ea8f16f79bf23435b620e6b5c598"} Jan 21 15:45:48 crc kubenswrapper[5021]: I0121 15:45:48.204130 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"7369dbac-285b-4322-8322-41b1b450d199","Type":"ContainerStarted","Data":"437d3b10fb0fb297b844f9dbf1d4a83367b420ef57cd073914c0525d5c579f5d"} Jan 21 15:45:48 crc kubenswrapper[5021]: W0121 15:45:48.748444 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddeb21bc7_83dd_4cbd_9ea2_a2378b4e12ca.slice/crio-7900cca3328ff6013f77818bc128877c868b228911631070e94bf38d3e5c6ae7 WatchSource:0}: Error finding container 7900cca3328ff6013f77818bc128877c868b228911631070e94bf38d3e5c6ae7: Status 404 returned error can't find the container with id 7900cca3328ff6013f77818bc128877c868b228911631070e94bf38d3e5c6ae7 Jan 21 15:45:48 crc kubenswrapper[5021]: I0121 15:45:48.776022 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 21 15:45:49 crc kubenswrapper[5021]: I0121 15:45:49.214276 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"7369dbac-285b-4322-8322-41b1b450d199","Type":"ContainerStarted","Data":"0771568118c8d7a6aa5ededbd663532cb393bc36afe29638946df189c2108dfc"} Jan 21 15:45:49 crc kubenswrapper[5021]: I0121 15:45:49.215855 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca","Type":"ContainerStarted","Data":"7900cca3328ff6013f77818bc128877c868b228911631070e94bf38d3e5c6ae7"} Jan 21 15:45:49 crc kubenswrapper[5021]: I0121 15:45:49.247952 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.247934735 podStartE2EDuration="3.247934735s" podCreationTimestamp="2026-01-21 15:45:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:45:49.240920116 +0000 UTC m=+1290.776034005" watchObservedRunningTime="2026-01-21 15:45:49.247934735 +0000 UTC m=+1290.783048624" Jan 21 15:45:49 crc kubenswrapper[5021]: I0121 15:45:49.624875 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-gm6fx" Jan 21 15:45:49 crc kubenswrapper[5021]: I0121 15:45:49.713623 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/cb49e3b7-78e5-4094-9bf0-d25f350d70a2-config\") pod \"cb49e3b7-78e5-4094-9bf0-d25f350d70a2\" (UID: \"cb49e3b7-78e5-4094-9bf0-d25f350d70a2\") " Jan 21 15:45:49 crc kubenswrapper[5021]: I0121 15:45:49.713838 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb49e3b7-78e5-4094-9bf0-d25f350d70a2-combined-ca-bundle\") pod \"cb49e3b7-78e5-4094-9bf0-d25f350d70a2\" (UID: \"cb49e3b7-78e5-4094-9bf0-d25f350d70a2\") " Jan 21 15:45:49 crc kubenswrapper[5021]: I0121 15:45:49.713962 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xnplq\" (UniqueName: \"kubernetes.io/projected/cb49e3b7-78e5-4094-9bf0-d25f350d70a2-kube-api-access-xnplq\") pod \"cb49e3b7-78e5-4094-9bf0-d25f350d70a2\" (UID: \"cb49e3b7-78e5-4094-9bf0-d25f350d70a2\") " Jan 21 15:45:49 crc kubenswrapper[5021]: I0121 15:45:49.720569 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb49e3b7-78e5-4094-9bf0-d25f350d70a2-kube-api-access-xnplq" (OuterVolumeSpecName: "kube-api-access-xnplq") pod "cb49e3b7-78e5-4094-9bf0-d25f350d70a2" (UID: "cb49e3b7-78e5-4094-9bf0-d25f350d70a2"). InnerVolumeSpecName "kube-api-access-xnplq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:45:49 crc kubenswrapper[5021]: I0121 15:45:49.751134 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb49e3b7-78e5-4094-9bf0-d25f350d70a2-config" (OuterVolumeSpecName: "config") pod "cb49e3b7-78e5-4094-9bf0-d25f350d70a2" (UID: "cb49e3b7-78e5-4094-9bf0-d25f350d70a2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:45:49 crc kubenswrapper[5021]: I0121 15:45:49.754254 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb49e3b7-78e5-4094-9bf0-d25f350d70a2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cb49e3b7-78e5-4094-9bf0-d25f350d70a2" (UID: "cb49e3b7-78e5-4094-9bf0-d25f350d70a2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:45:49 crc kubenswrapper[5021]: I0121 15:45:49.820265 5021 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/cb49e3b7-78e5-4094-9bf0-d25f350d70a2-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:49 crc kubenswrapper[5021]: I0121 15:45:49.820304 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb49e3b7-78e5-4094-9bf0-d25f350d70a2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:49 crc kubenswrapper[5021]: I0121 15:45:49.820315 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xnplq\" (UniqueName: \"kubernetes.io/projected/cb49e3b7-78e5-4094-9bf0-d25f350d70a2-kube-api-access-xnplq\") on node \"crc\" DevicePath \"\"" Jan 21 15:45:50 crc kubenswrapper[5021]: I0121 15:45:50.230596 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-gm6fx" Jan 21 15:45:50 crc kubenswrapper[5021]: I0121 15:45:50.230992 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-gm6fx" event={"ID":"cb49e3b7-78e5-4094-9bf0-d25f350d70a2","Type":"ContainerDied","Data":"3b94e4226aa112d7f8b06e17e98c660f45a98d12937ad51b10b9f633028499ec"} Jan 21 15:45:50 crc kubenswrapper[5021]: I0121 15:45:50.231042 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3b94e4226aa112d7f8b06e17e98c660f45a98d12937ad51b10b9f633028499ec" Jan 21 15:45:50 crc kubenswrapper[5021]: I0121 15:45:50.418987 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-d5z29"] Jan 21 15:45:50 crc kubenswrapper[5021]: E0121 15:45:50.419613 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb49e3b7-78e5-4094-9bf0-d25f350d70a2" containerName="neutron-db-sync" Jan 21 15:45:50 crc kubenswrapper[5021]: I0121 15:45:50.419632 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb49e3b7-78e5-4094-9bf0-d25f350d70a2" containerName="neutron-db-sync" Jan 21 15:45:50 crc kubenswrapper[5021]: I0121 15:45:50.419823 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb49e3b7-78e5-4094-9bf0-d25f350d70a2" containerName="neutron-db-sync" Jan 21 15:45:50 crc kubenswrapper[5021]: I0121 15:45:50.421740 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb4fc677f-d5z29" Jan 21 15:45:50 crc kubenswrapper[5021]: I0121 15:45:50.453025 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-d5z29"] Jan 21 15:45:50 crc kubenswrapper[5021]: I0121 15:45:50.501783 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-5cc8cdbd96-42qdw"] Jan 21 15:45:50 crc kubenswrapper[5021]: I0121 15:45:50.503756 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5cc8cdbd96-42qdw" Jan 21 15:45:50 crc kubenswrapper[5021]: I0121 15:45:50.506601 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Jan 21 15:45:50 crc kubenswrapper[5021]: I0121 15:45:50.506837 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Jan 21 15:45:50 crc kubenswrapper[5021]: I0121 15:45:50.508162 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Jan 21 15:45:50 crc kubenswrapper[5021]: I0121 15:45:50.513856 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-gc7s6" Jan 21 15:45:50 crc kubenswrapper[5021]: I0121 15:45:50.536024 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a74c962f-f04a-4b18-b50b-4546528776b5-config\") pod \"dnsmasq-dns-6bb4fc677f-d5z29\" (UID: \"a74c962f-f04a-4b18-b50b-4546528776b5\") " pod="openstack/dnsmasq-dns-6bb4fc677f-d5z29" Jan 21 15:45:50 crc kubenswrapper[5021]: I0121 15:45:50.536067 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a74c962f-f04a-4b18-b50b-4546528776b5-ovsdbserver-sb\") pod \"dnsmasq-dns-6bb4fc677f-d5z29\" (UID: \"a74c962f-f04a-4b18-b50b-4546528776b5\") " pod="openstack/dnsmasq-dns-6bb4fc677f-d5z29" Jan 21 15:45:50 crc kubenswrapper[5021]: I0121 15:45:50.536125 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a74c962f-f04a-4b18-b50b-4546528776b5-dns-svc\") pod \"dnsmasq-dns-6bb4fc677f-d5z29\" (UID: \"a74c962f-f04a-4b18-b50b-4546528776b5\") " pod="openstack/dnsmasq-dns-6bb4fc677f-d5z29" Jan 21 15:45:50 crc kubenswrapper[5021]: I0121 15:45:50.536210 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pxj54\" (UniqueName: \"kubernetes.io/projected/a74c962f-f04a-4b18-b50b-4546528776b5-kube-api-access-pxj54\") pod \"dnsmasq-dns-6bb4fc677f-d5z29\" (UID: \"a74c962f-f04a-4b18-b50b-4546528776b5\") " pod="openstack/dnsmasq-dns-6bb4fc677f-d5z29" Jan 21 15:45:50 crc kubenswrapper[5021]: I0121 15:45:50.536236 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a74c962f-f04a-4b18-b50b-4546528776b5-dns-swift-storage-0\") pod \"dnsmasq-dns-6bb4fc677f-d5z29\" (UID: \"a74c962f-f04a-4b18-b50b-4546528776b5\") " pod="openstack/dnsmasq-dns-6bb4fc677f-d5z29" Jan 21 15:45:50 crc kubenswrapper[5021]: I0121 15:45:50.536290 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a74c962f-f04a-4b18-b50b-4546528776b5-ovsdbserver-nb\") pod \"dnsmasq-dns-6bb4fc677f-d5z29\" (UID: \"a74c962f-f04a-4b18-b50b-4546528776b5\") " pod="openstack/dnsmasq-dns-6bb4fc677f-d5z29" Jan 21 15:45:50 crc kubenswrapper[5021]: I0121 15:45:50.536428 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5cc8cdbd96-42qdw"] Jan 21 15:45:50 crc kubenswrapper[5021]: I0121 15:45:50.638326 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a74c962f-f04a-4b18-b50b-4546528776b5-dns-svc\") pod \"dnsmasq-dns-6bb4fc677f-d5z29\" (UID: \"a74c962f-f04a-4b18-b50b-4546528776b5\") " pod="openstack/dnsmasq-dns-6bb4fc677f-d5z29" Jan 21 15:45:50 crc kubenswrapper[5021]: I0121 15:45:50.638703 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/48140321-e331-4340-868b-d050bbfcbd92-httpd-config\") pod \"neutron-5cc8cdbd96-42qdw\" (UID: \"48140321-e331-4340-868b-d050bbfcbd92\") " pod="openstack/neutron-5cc8cdbd96-42qdw" Jan 21 15:45:50 crc kubenswrapper[5021]: I0121 15:45:50.638779 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/48140321-e331-4340-868b-d050bbfcbd92-ovndb-tls-certs\") pod \"neutron-5cc8cdbd96-42qdw\" (UID: \"48140321-e331-4340-868b-d050bbfcbd92\") " pod="openstack/neutron-5cc8cdbd96-42qdw" Jan 21 15:45:50 crc kubenswrapper[5021]: I0121 15:45:50.638823 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pxj54\" (UniqueName: \"kubernetes.io/projected/a74c962f-f04a-4b18-b50b-4546528776b5-kube-api-access-pxj54\") pod \"dnsmasq-dns-6bb4fc677f-d5z29\" (UID: \"a74c962f-f04a-4b18-b50b-4546528776b5\") " pod="openstack/dnsmasq-dns-6bb4fc677f-d5z29" Jan 21 15:45:50 crc kubenswrapper[5021]: I0121 15:45:50.638854 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a74c962f-f04a-4b18-b50b-4546528776b5-dns-swift-storage-0\") pod \"dnsmasq-dns-6bb4fc677f-d5z29\" (UID: \"a74c962f-f04a-4b18-b50b-4546528776b5\") " pod="openstack/dnsmasq-dns-6bb4fc677f-d5z29" Jan 21 15:45:50 crc kubenswrapper[5021]: I0121 15:45:50.638889 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tlwcl\" (UniqueName: \"kubernetes.io/projected/48140321-e331-4340-868b-d050bbfcbd92-kube-api-access-tlwcl\") pod \"neutron-5cc8cdbd96-42qdw\" (UID: \"48140321-e331-4340-868b-d050bbfcbd92\") " pod="openstack/neutron-5cc8cdbd96-42qdw" Jan 21 15:45:50 crc kubenswrapper[5021]: I0121 15:45:50.638943 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a74c962f-f04a-4b18-b50b-4546528776b5-ovsdbserver-nb\") pod \"dnsmasq-dns-6bb4fc677f-d5z29\" (UID: \"a74c962f-f04a-4b18-b50b-4546528776b5\") " pod="openstack/dnsmasq-dns-6bb4fc677f-d5z29" Jan 21 15:45:50 crc kubenswrapper[5021]: I0121 15:45:50.638976 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48140321-e331-4340-868b-d050bbfcbd92-combined-ca-bundle\") pod \"neutron-5cc8cdbd96-42qdw\" (UID: \"48140321-e331-4340-868b-d050bbfcbd92\") " pod="openstack/neutron-5cc8cdbd96-42qdw" Jan 21 15:45:50 crc kubenswrapper[5021]: I0121 15:45:50.639025 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/48140321-e331-4340-868b-d050bbfcbd92-config\") pod \"neutron-5cc8cdbd96-42qdw\" (UID: \"48140321-e331-4340-868b-d050bbfcbd92\") " pod="openstack/neutron-5cc8cdbd96-42qdw" Jan 21 15:45:50 crc kubenswrapper[5021]: I0121 15:45:50.639046 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a74c962f-f04a-4b18-b50b-4546528776b5-config\") pod \"dnsmasq-dns-6bb4fc677f-d5z29\" (UID: \"a74c962f-f04a-4b18-b50b-4546528776b5\") " pod="openstack/dnsmasq-dns-6bb4fc677f-d5z29" Jan 21 15:45:50 crc kubenswrapper[5021]: I0121 15:45:50.639064 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a74c962f-f04a-4b18-b50b-4546528776b5-ovsdbserver-sb\") pod \"dnsmasq-dns-6bb4fc677f-d5z29\" (UID: \"a74c962f-f04a-4b18-b50b-4546528776b5\") " pod="openstack/dnsmasq-dns-6bb4fc677f-d5z29" Jan 21 15:45:50 crc kubenswrapper[5021]: I0121 15:45:50.639371 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a74c962f-f04a-4b18-b50b-4546528776b5-dns-svc\") pod \"dnsmasq-dns-6bb4fc677f-d5z29\" (UID: \"a74c962f-f04a-4b18-b50b-4546528776b5\") " pod="openstack/dnsmasq-dns-6bb4fc677f-d5z29" Jan 21 15:45:50 crc kubenswrapper[5021]: I0121 15:45:50.639968 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a74c962f-f04a-4b18-b50b-4546528776b5-ovsdbserver-sb\") pod \"dnsmasq-dns-6bb4fc677f-d5z29\" (UID: \"a74c962f-f04a-4b18-b50b-4546528776b5\") " pod="openstack/dnsmasq-dns-6bb4fc677f-d5z29" Jan 21 15:45:50 crc kubenswrapper[5021]: I0121 15:45:50.640236 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a74c962f-f04a-4b18-b50b-4546528776b5-ovsdbserver-nb\") pod \"dnsmasq-dns-6bb4fc677f-d5z29\" (UID: \"a74c962f-f04a-4b18-b50b-4546528776b5\") " pod="openstack/dnsmasq-dns-6bb4fc677f-d5z29" Jan 21 15:45:50 crc kubenswrapper[5021]: I0121 15:45:50.640271 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a74c962f-f04a-4b18-b50b-4546528776b5-dns-swift-storage-0\") pod \"dnsmasq-dns-6bb4fc677f-d5z29\" (UID: \"a74c962f-f04a-4b18-b50b-4546528776b5\") " pod="openstack/dnsmasq-dns-6bb4fc677f-d5z29" Jan 21 15:45:50 crc kubenswrapper[5021]: I0121 15:45:50.640359 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a74c962f-f04a-4b18-b50b-4546528776b5-config\") pod \"dnsmasq-dns-6bb4fc677f-d5z29\" (UID: \"a74c962f-f04a-4b18-b50b-4546528776b5\") " pod="openstack/dnsmasq-dns-6bb4fc677f-d5z29" Jan 21 15:45:50 crc kubenswrapper[5021]: I0121 15:45:50.660047 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pxj54\" (UniqueName: \"kubernetes.io/projected/a74c962f-f04a-4b18-b50b-4546528776b5-kube-api-access-pxj54\") pod \"dnsmasq-dns-6bb4fc677f-d5z29\" (UID: \"a74c962f-f04a-4b18-b50b-4546528776b5\") " pod="openstack/dnsmasq-dns-6bb4fc677f-d5z29" Jan 21 15:45:50 crc kubenswrapper[5021]: I0121 15:45:50.741721 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/48140321-e331-4340-868b-d050bbfcbd92-ovndb-tls-certs\") pod \"neutron-5cc8cdbd96-42qdw\" (UID: \"48140321-e331-4340-868b-d050bbfcbd92\") " pod="openstack/neutron-5cc8cdbd96-42qdw" Jan 21 15:45:50 crc kubenswrapper[5021]: I0121 15:45:50.741850 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tlwcl\" (UniqueName: \"kubernetes.io/projected/48140321-e331-4340-868b-d050bbfcbd92-kube-api-access-tlwcl\") pod \"neutron-5cc8cdbd96-42qdw\" (UID: \"48140321-e331-4340-868b-d050bbfcbd92\") " pod="openstack/neutron-5cc8cdbd96-42qdw" Jan 21 15:45:50 crc kubenswrapper[5021]: I0121 15:45:50.742002 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48140321-e331-4340-868b-d050bbfcbd92-combined-ca-bundle\") pod \"neutron-5cc8cdbd96-42qdw\" (UID: \"48140321-e331-4340-868b-d050bbfcbd92\") " pod="openstack/neutron-5cc8cdbd96-42qdw" Jan 21 15:45:50 crc kubenswrapper[5021]: I0121 15:45:50.742050 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/48140321-e331-4340-868b-d050bbfcbd92-config\") pod \"neutron-5cc8cdbd96-42qdw\" (UID: \"48140321-e331-4340-868b-d050bbfcbd92\") " pod="openstack/neutron-5cc8cdbd96-42qdw" Jan 21 15:45:50 crc kubenswrapper[5021]: I0121 15:45:50.742272 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/48140321-e331-4340-868b-d050bbfcbd92-httpd-config\") pod \"neutron-5cc8cdbd96-42qdw\" (UID: \"48140321-e331-4340-868b-d050bbfcbd92\") " pod="openstack/neutron-5cc8cdbd96-42qdw" Jan 21 15:45:50 crc kubenswrapper[5021]: I0121 15:45:50.748607 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/48140321-e331-4340-868b-d050bbfcbd92-ovndb-tls-certs\") pod \"neutron-5cc8cdbd96-42qdw\" (UID: \"48140321-e331-4340-868b-d050bbfcbd92\") " pod="openstack/neutron-5cc8cdbd96-42qdw" Jan 21 15:45:50 crc kubenswrapper[5021]: I0121 15:45:50.749810 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/48140321-e331-4340-868b-d050bbfcbd92-config\") pod \"neutron-5cc8cdbd96-42qdw\" (UID: \"48140321-e331-4340-868b-d050bbfcbd92\") " pod="openstack/neutron-5cc8cdbd96-42qdw" Jan 21 15:45:50 crc kubenswrapper[5021]: I0121 15:45:50.749884 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48140321-e331-4340-868b-d050bbfcbd92-combined-ca-bundle\") pod \"neutron-5cc8cdbd96-42qdw\" (UID: \"48140321-e331-4340-868b-d050bbfcbd92\") " pod="openstack/neutron-5cc8cdbd96-42qdw" Jan 21 15:45:50 crc kubenswrapper[5021]: I0121 15:45:50.749956 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/48140321-e331-4340-868b-d050bbfcbd92-httpd-config\") pod \"neutron-5cc8cdbd96-42qdw\" (UID: \"48140321-e331-4340-868b-d050bbfcbd92\") " pod="openstack/neutron-5cc8cdbd96-42qdw" Jan 21 15:45:50 crc kubenswrapper[5021]: I0121 15:45:50.750711 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb4fc677f-d5z29" Jan 21 15:45:50 crc kubenswrapper[5021]: I0121 15:45:50.772839 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tlwcl\" (UniqueName: \"kubernetes.io/projected/48140321-e331-4340-868b-d050bbfcbd92-kube-api-access-tlwcl\") pod \"neutron-5cc8cdbd96-42qdw\" (UID: \"48140321-e331-4340-868b-d050bbfcbd92\") " pod="openstack/neutron-5cc8cdbd96-42qdw" Jan 21 15:45:50 crc kubenswrapper[5021]: I0121 15:45:50.836272 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5cc8cdbd96-42qdw" Jan 21 15:45:51 crc kubenswrapper[5021]: I0121 15:45:51.312838 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-d5z29"] Jan 21 15:45:51 crc kubenswrapper[5021]: I0121 15:45:51.611346 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5cc8cdbd96-42qdw"] Jan 21 15:45:51 crc kubenswrapper[5021]: W0121 15:45:51.614173 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod48140321_e331_4340_868b_d050bbfcbd92.slice/crio-25155ad38b901827ea1851d388cf62d76c81ec29d7a1a1e7ddd2370c508ec43e WatchSource:0}: Error finding container 25155ad38b901827ea1851d388cf62d76c81ec29d7a1a1e7ddd2370c508ec43e: Status 404 returned error can't find the container with id 25155ad38b901827ea1851d388cf62d76c81ec29d7a1a1e7ddd2370c508ec43e Jan 21 15:45:51 crc kubenswrapper[5021]: I0121 15:45:51.640474 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 21 15:45:52 crc kubenswrapper[5021]: I0121 15:45:52.297302 5021 generic.go:334] "Generic (PLEG): container finished" podID="a74c962f-f04a-4b18-b50b-4546528776b5" containerID="89da4f4af68b745600e9479b39cba9531d6d6bb0cb36940b027129118b9991ca" exitCode=0 Jan 21 15:45:52 crc kubenswrapper[5021]: I0121 15:45:52.297385 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb4fc677f-d5z29" event={"ID":"a74c962f-f04a-4b18-b50b-4546528776b5","Type":"ContainerDied","Data":"89da4f4af68b745600e9479b39cba9531d6d6bb0cb36940b027129118b9991ca"} Jan 21 15:45:52 crc kubenswrapper[5021]: I0121 15:45:52.297717 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb4fc677f-d5z29" event={"ID":"a74c962f-f04a-4b18-b50b-4546528776b5","Type":"ContainerStarted","Data":"b008daae3f85d1848f6a64978c41509faf0ce07f07cd5d79be8db63acf1f837f"} Jan 21 15:45:52 crc kubenswrapper[5021]: I0121 15:45:52.301801 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5cc8cdbd96-42qdw" event={"ID":"48140321-e331-4340-868b-d050bbfcbd92","Type":"ContainerStarted","Data":"7db78f313be2bf899060a39f4f6e01c74bfd6f22e83686e7cc735c19bd8e3c1b"} Jan 21 15:45:52 crc kubenswrapper[5021]: I0121 15:45:52.301844 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5cc8cdbd96-42qdw" event={"ID":"48140321-e331-4340-868b-d050bbfcbd92","Type":"ContainerStarted","Data":"25155ad38b901827ea1851d388cf62d76c81ec29d7a1a1e7ddd2370c508ec43e"} Jan 21 15:45:53 crc kubenswrapper[5021]: I0121 15:45:53.311680 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb4fc677f-d5z29" event={"ID":"a74c962f-f04a-4b18-b50b-4546528776b5","Type":"ContainerStarted","Data":"812d4a087958b8c409ebffe2d9d27c71091c65e0167a82c9a44196c8fc4e94bf"} Jan 21 15:45:53 crc kubenswrapper[5021]: I0121 15:45:53.312209 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6bb4fc677f-d5z29" Jan 21 15:45:53 crc kubenswrapper[5021]: I0121 15:45:53.314330 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5cc8cdbd96-42qdw" event={"ID":"48140321-e331-4340-868b-d050bbfcbd92","Type":"ContainerStarted","Data":"dae06be82c19848eba9aae53d94ac929368f48abdf5774d76ed75afee2f1baa6"} Jan 21 15:45:53 crc kubenswrapper[5021]: I0121 15:45:53.314938 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-5cc8cdbd96-42qdw" Jan 21 15:45:53 crc kubenswrapper[5021]: I0121 15:45:53.337899 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6bb4fc677f-d5z29" podStartSLOduration=3.33787784 podStartE2EDuration="3.33787784s" podCreationTimestamp="2026-01-21 15:45:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:45:53.332539666 +0000 UTC m=+1294.867653555" watchObservedRunningTime="2026-01-21 15:45:53.33787784 +0000 UTC m=+1294.872991729" Jan 21 15:45:53 crc kubenswrapper[5021]: I0121 15:45:53.925291 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 21 15:45:53 crc kubenswrapper[5021]: I0121 15:45:53.947633 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-5cc8cdbd96-42qdw" podStartSLOduration=3.9476160289999997 podStartE2EDuration="3.947616029s" podCreationTimestamp="2026-01-21 15:45:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:45:53.359288635 +0000 UTC m=+1294.894402524" watchObservedRunningTime="2026-01-21 15:45:53.947616029 +0000 UTC m=+1295.482729918" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.137008 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-7bbf467d99-62cpf"] Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.138591 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-7bbf467d99-62cpf" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.142756 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.143006 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.143186 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.175661 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-7bbf467d99-62cpf"] Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.216963 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4687452c-74ba-4f3e-ac17-1cf4c2e514d8-log-httpd\") pod \"swift-proxy-7bbf467d99-62cpf\" (UID: \"4687452c-74ba-4f3e-ac17-1cf4c2e514d8\") " pod="openstack/swift-proxy-7bbf467d99-62cpf" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.217032 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4687452c-74ba-4f3e-ac17-1cf4c2e514d8-combined-ca-bundle\") pod \"swift-proxy-7bbf467d99-62cpf\" (UID: \"4687452c-74ba-4f3e-ac17-1cf4c2e514d8\") " pod="openstack/swift-proxy-7bbf467d99-62cpf" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.217053 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rf8ff\" (UniqueName: \"kubernetes.io/projected/4687452c-74ba-4f3e-ac17-1cf4c2e514d8-kube-api-access-rf8ff\") pod \"swift-proxy-7bbf467d99-62cpf\" (UID: \"4687452c-74ba-4f3e-ac17-1cf4c2e514d8\") " pod="openstack/swift-proxy-7bbf467d99-62cpf" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.217083 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4687452c-74ba-4f3e-ac17-1cf4c2e514d8-internal-tls-certs\") pod \"swift-proxy-7bbf467d99-62cpf\" (UID: \"4687452c-74ba-4f3e-ac17-1cf4c2e514d8\") " pod="openstack/swift-proxy-7bbf467d99-62cpf" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.217133 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4687452c-74ba-4f3e-ac17-1cf4c2e514d8-run-httpd\") pod \"swift-proxy-7bbf467d99-62cpf\" (UID: \"4687452c-74ba-4f3e-ac17-1cf4c2e514d8\") " pod="openstack/swift-proxy-7bbf467d99-62cpf" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.217162 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4687452c-74ba-4f3e-ac17-1cf4c2e514d8-etc-swift\") pod \"swift-proxy-7bbf467d99-62cpf\" (UID: \"4687452c-74ba-4f3e-ac17-1cf4c2e514d8\") " pod="openstack/swift-proxy-7bbf467d99-62cpf" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.217194 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4687452c-74ba-4f3e-ac17-1cf4c2e514d8-config-data\") pod \"swift-proxy-7bbf467d99-62cpf\" (UID: \"4687452c-74ba-4f3e-ac17-1cf4c2e514d8\") " pod="openstack/swift-proxy-7bbf467d99-62cpf" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.217209 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4687452c-74ba-4f3e-ac17-1cf4c2e514d8-public-tls-certs\") pod \"swift-proxy-7bbf467d99-62cpf\" (UID: \"4687452c-74ba-4f3e-ac17-1cf4c2e514d8\") " pod="openstack/swift-proxy-7bbf467d99-62cpf" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.238328 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.319271 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4687452c-74ba-4f3e-ac17-1cf4c2e514d8-log-httpd\") pod \"swift-proxy-7bbf467d99-62cpf\" (UID: \"4687452c-74ba-4f3e-ac17-1cf4c2e514d8\") " pod="openstack/swift-proxy-7bbf467d99-62cpf" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.319311 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4687452c-74ba-4f3e-ac17-1cf4c2e514d8-combined-ca-bundle\") pod \"swift-proxy-7bbf467d99-62cpf\" (UID: \"4687452c-74ba-4f3e-ac17-1cf4c2e514d8\") " pod="openstack/swift-proxy-7bbf467d99-62cpf" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.319333 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rf8ff\" (UniqueName: \"kubernetes.io/projected/4687452c-74ba-4f3e-ac17-1cf4c2e514d8-kube-api-access-rf8ff\") pod \"swift-proxy-7bbf467d99-62cpf\" (UID: \"4687452c-74ba-4f3e-ac17-1cf4c2e514d8\") " pod="openstack/swift-proxy-7bbf467d99-62cpf" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.319369 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4687452c-74ba-4f3e-ac17-1cf4c2e514d8-internal-tls-certs\") pod \"swift-proxy-7bbf467d99-62cpf\" (UID: \"4687452c-74ba-4f3e-ac17-1cf4c2e514d8\") " pod="openstack/swift-proxy-7bbf467d99-62cpf" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.319468 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4687452c-74ba-4f3e-ac17-1cf4c2e514d8-run-httpd\") pod \"swift-proxy-7bbf467d99-62cpf\" (UID: \"4687452c-74ba-4f3e-ac17-1cf4c2e514d8\") " pod="openstack/swift-proxy-7bbf467d99-62cpf" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.319496 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4687452c-74ba-4f3e-ac17-1cf4c2e514d8-etc-swift\") pod \"swift-proxy-7bbf467d99-62cpf\" (UID: \"4687452c-74ba-4f3e-ac17-1cf4c2e514d8\") " pod="openstack/swift-proxy-7bbf467d99-62cpf" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.319538 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4687452c-74ba-4f3e-ac17-1cf4c2e514d8-config-data\") pod \"swift-proxy-7bbf467d99-62cpf\" (UID: \"4687452c-74ba-4f3e-ac17-1cf4c2e514d8\") " pod="openstack/swift-proxy-7bbf467d99-62cpf" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.319554 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4687452c-74ba-4f3e-ac17-1cf4c2e514d8-public-tls-certs\") pod \"swift-proxy-7bbf467d99-62cpf\" (UID: \"4687452c-74ba-4f3e-ac17-1cf4c2e514d8\") " pod="openstack/swift-proxy-7bbf467d99-62cpf" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.320480 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4687452c-74ba-4f3e-ac17-1cf4c2e514d8-run-httpd\") pod \"swift-proxy-7bbf467d99-62cpf\" (UID: \"4687452c-74ba-4f3e-ac17-1cf4c2e514d8\") " pod="openstack/swift-proxy-7bbf467d99-62cpf" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.320782 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4687452c-74ba-4f3e-ac17-1cf4c2e514d8-log-httpd\") pod \"swift-proxy-7bbf467d99-62cpf\" (UID: \"4687452c-74ba-4f3e-ac17-1cf4c2e514d8\") " pod="openstack/swift-proxy-7bbf467d99-62cpf" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.327769 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4687452c-74ba-4f3e-ac17-1cf4c2e514d8-etc-swift\") pod \"swift-proxy-7bbf467d99-62cpf\" (UID: \"4687452c-74ba-4f3e-ac17-1cf4c2e514d8\") " pod="openstack/swift-proxy-7bbf467d99-62cpf" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.330405 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4687452c-74ba-4f3e-ac17-1cf4c2e514d8-config-data\") pod \"swift-proxy-7bbf467d99-62cpf\" (UID: \"4687452c-74ba-4f3e-ac17-1cf4c2e514d8\") " pod="openstack/swift-proxy-7bbf467d99-62cpf" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.335810 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4687452c-74ba-4f3e-ac17-1cf4c2e514d8-combined-ca-bundle\") pod \"swift-proxy-7bbf467d99-62cpf\" (UID: \"4687452c-74ba-4f3e-ac17-1cf4c2e514d8\") " pod="openstack/swift-proxy-7bbf467d99-62cpf" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.336320 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4687452c-74ba-4f3e-ac17-1cf4c2e514d8-public-tls-certs\") pod \"swift-proxy-7bbf467d99-62cpf\" (UID: \"4687452c-74ba-4f3e-ac17-1cf4c2e514d8\") " pod="openstack/swift-proxy-7bbf467d99-62cpf" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.338754 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4687452c-74ba-4f3e-ac17-1cf4c2e514d8-internal-tls-certs\") pod \"swift-proxy-7bbf467d99-62cpf\" (UID: \"4687452c-74ba-4f3e-ac17-1cf4c2e514d8\") " pod="openstack/swift-proxy-7bbf467d99-62cpf" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.353760 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rf8ff\" (UniqueName: \"kubernetes.io/projected/4687452c-74ba-4f3e-ac17-1cf4c2e514d8-kube-api-access-rf8ff\") pod \"swift-proxy-7bbf467d99-62cpf\" (UID: \"4687452c-74ba-4f3e-ac17-1cf4c2e514d8\") " pod="openstack/swift-proxy-7bbf467d99-62cpf" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.431983 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-c795c5585-m9bzp"] Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.433627 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-c795c5585-m9bzp" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.437707 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.439919 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-c795c5585-m9bzp"] Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.450491 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.474160 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-7bbf467d99-62cpf" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.538636 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/ddbf76eb-0e2a-4332-b741-0e0b63b60465-config\") pod \"neutron-c795c5585-m9bzp\" (UID: \"ddbf76eb-0e2a-4332-b741-0e0b63b60465\") " pod="openstack/neutron-c795c5585-m9bzp" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.538711 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cc4wf\" (UniqueName: \"kubernetes.io/projected/ddbf76eb-0e2a-4332-b741-0e0b63b60465-kube-api-access-cc4wf\") pod \"neutron-c795c5585-m9bzp\" (UID: \"ddbf76eb-0e2a-4332-b741-0e0b63b60465\") " pod="openstack/neutron-c795c5585-m9bzp" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.538735 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ddbf76eb-0e2a-4332-b741-0e0b63b60465-combined-ca-bundle\") pod \"neutron-c795c5585-m9bzp\" (UID: \"ddbf76eb-0e2a-4332-b741-0e0b63b60465\") " pod="openstack/neutron-c795c5585-m9bzp" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.538762 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/ddbf76eb-0e2a-4332-b741-0e0b63b60465-httpd-config\") pod \"neutron-c795c5585-m9bzp\" (UID: \"ddbf76eb-0e2a-4332-b741-0e0b63b60465\") " pod="openstack/neutron-c795c5585-m9bzp" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.538777 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ddbf76eb-0e2a-4332-b741-0e0b63b60465-internal-tls-certs\") pod \"neutron-c795c5585-m9bzp\" (UID: \"ddbf76eb-0e2a-4332-b741-0e0b63b60465\") " pod="openstack/neutron-c795c5585-m9bzp" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.538871 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ddbf76eb-0e2a-4332-b741-0e0b63b60465-public-tls-certs\") pod \"neutron-c795c5585-m9bzp\" (UID: \"ddbf76eb-0e2a-4332-b741-0e0b63b60465\") " pod="openstack/neutron-c795c5585-m9bzp" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.538893 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ddbf76eb-0e2a-4332-b741-0e0b63b60465-ovndb-tls-certs\") pod \"neutron-c795c5585-m9bzp\" (UID: \"ddbf76eb-0e2a-4332-b741-0e0b63b60465\") " pod="openstack/neutron-c795c5585-m9bzp" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.642888 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cc4wf\" (UniqueName: \"kubernetes.io/projected/ddbf76eb-0e2a-4332-b741-0e0b63b60465-kube-api-access-cc4wf\") pod \"neutron-c795c5585-m9bzp\" (UID: \"ddbf76eb-0e2a-4332-b741-0e0b63b60465\") " pod="openstack/neutron-c795c5585-m9bzp" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.642969 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ddbf76eb-0e2a-4332-b741-0e0b63b60465-combined-ca-bundle\") pod \"neutron-c795c5585-m9bzp\" (UID: \"ddbf76eb-0e2a-4332-b741-0e0b63b60465\") " pod="openstack/neutron-c795c5585-m9bzp" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.643005 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/ddbf76eb-0e2a-4332-b741-0e0b63b60465-httpd-config\") pod \"neutron-c795c5585-m9bzp\" (UID: \"ddbf76eb-0e2a-4332-b741-0e0b63b60465\") " pod="openstack/neutron-c795c5585-m9bzp" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.643022 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ddbf76eb-0e2a-4332-b741-0e0b63b60465-internal-tls-certs\") pod \"neutron-c795c5585-m9bzp\" (UID: \"ddbf76eb-0e2a-4332-b741-0e0b63b60465\") " pod="openstack/neutron-c795c5585-m9bzp" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.643118 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ddbf76eb-0e2a-4332-b741-0e0b63b60465-public-tls-certs\") pod \"neutron-c795c5585-m9bzp\" (UID: \"ddbf76eb-0e2a-4332-b741-0e0b63b60465\") " pod="openstack/neutron-c795c5585-m9bzp" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.643141 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ddbf76eb-0e2a-4332-b741-0e0b63b60465-ovndb-tls-certs\") pod \"neutron-c795c5585-m9bzp\" (UID: \"ddbf76eb-0e2a-4332-b741-0e0b63b60465\") " pod="openstack/neutron-c795c5585-m9bzp" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.643158 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/ddbf76eb-0e2a-4332-b741-0e0b63b60465-config\") pod \"neutron-c795c5585-m9bzp\" (UID: \"ddbf76eb-0e2a-4332-b741-0e0b63b60465\") " pod="openstack/neutron-c795c5585-m9bzp" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.654864 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/ddbf76eb-0e2a-4332-b741-0e0b63b60465-config\") pod \"neutron-c795c5585-m9bzp\" (UID: \"ddbf76eb-0e2a-4332-b741-0e0b63b60465\") " pod="openstack/neutron-c795c5585-m9bzp" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.655702 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ddbf76eb-0e2a-4332-b741-0e0b63b60465-ovndb-tls-certs\") pod \"neutron-c795c5585-m9bzp\" (UID: \"ddbf76eb-0e2a-4332-b741-0e0b63b60465\") " pod="openstack/neutron-c795c5585-m9bzp" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.655808 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ddbf76eb-0e2a-4332-b741-0e0b63b60465-public-tls-certs\") pod \"neutron-c795c5585-m9bzp\" (UID: \"ddbf76eb-0e2a-4332-b741-0e0b63b60465\") " pod="openstack/neutron-c795c5585-m9bzp" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.660639 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ddbf76eb-0e2a-4332-b741-0e0b63b60465-combined-ca-bundle\") pod \"neutron-c795c5585-m9bzp\" (UID: \"ddbf76eb-0e2a-4332-b741-0e0b63b60465\") " pod="openstack/neutron-c795c5585-m9bzp" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.661133 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ddbf76eb-0e2a-4332-b741-0e0b63b60465-internal-tls-certs\") pod \"neutron-c795c5585-m9bzp\" (UID: \"ddbf76eb-0e2a-4332-b741-0e0b63b60465\") " pod="openstack/neutron-c795c5585-m9bzp" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.661700 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/ddbf76eb-0e2a-4332-b741-0e0b63b60465-httpd-config\") pod \"neutron-c795c5585-m9bzp\" (UID: \"ddbf76eb-0e2a-4332-b741-0e0b63b60465\") " pod="openstack/neutron-c795c5585-m9bzp" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.675597 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cc4wf\" (UniqueName: \"kubernetes.io/projected/ddbf76eb-0e2a-4332-b741-0e0b63b60465-kube-api-access-cc4wf\") pod \"neutron-c795c5585-m9bzp\" (UID: \"ddbf76eb-0e2a-4332-b741-0e0b63b60465\") " pod="openstack/neutron-c795c5585-m9bzp" Jan 21 15:45:54 crc kubenswrapper[5021]: I0121 15:45:54.821461 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-c795c5585-m9bzp" Jan 21 15:45:55 crc kubenswrapper[5021]: I0121 15:45:55.289173 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-7bbf467d99-62cpf"] Jan 21 15:45:55 crc kubenswrapper[5021]: I0121 15:45:55.357364 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-7bbf467d99-62cpf" event={"ID":"4687452c-74ba-4f3e-ac17-1cf4c2e514d8","Type":"ContainerStarted","Data":"f7c52590fe65cfef2e3a4e669943ba59aa01519cb28c7c39ea0ef813edfa066b"} Jan 21 15:45:55 crc kubenswrapper[5021]: I0121 15:45:55.538744 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-c795c5585-m9bzp"] Jan 21 15:45:56 crc kubenswrapper[5021]: I0121 15:45:56.368793 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-c795c5585-m9bzp" event={"ID":"ddbf76eb-0e2a-4332-b741-0e0b63b60465","Type":"ContainerStarted","Data":"f2a67cef1ba2db3bd18f3dfff338b23523c5e1f0086e64b2d5a4f0a2940f8a1e"} Jan 21 15:45:56 crc kubenswrapper[5021]: I0121 15:45:56.369175 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-c795c5585-m9bzp" event={"ID":"ddbf76eb-0e2a-4332-b741-0e0b63b60465","Type":"ContainerStarted","Data":"40d25da6134bf8a7e089cfd3c065d27e8bfaa57441da4124734dff449dcf1ca3"} Jan 21 15:45:56 crc kubenswrapper[5021]: I0121 15:45:56.369190 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-c795c5585-m9bzp" event={"ID":"ddbf76eb-0e2a-4332-b741-0e0b63b60465","Type":"ContainerStarted","Data":"5a0c806db16dee09c2027bd3e03417e0fc89a8d6f346f0958538c0a1a8dc48c4"} Jan 21 15:45:56 crc kubenswrapper[5021]: I0121 15:45:56.369596 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-c795c5585-m9bzp" Jan 21 15:45:56 crc kubenswrapper[5021]: I0121 15:45:56.371644 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-7bbf467d99-62cpf" event={"ID":"4687452c-74ba-4f3e-ac17-1cf4c2e514d8","Type":"ContainerStarted","Data":"21d2a742eb18fb8403f215dcc111b5047c3d3f27852bd27e95a06b29a951b3cd"} Jan 21 15:45:56 crc kubenswrapper[5021]: I0121 15:45:56.371689 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-7bbf467d99-62cpf" event={"ID":"4687452c-74ba-4f3e-ac17-1cf4c2e514d8","Type":"ContainerStarted","Data":"16670df6a896fe5b4ccec437b88876c2007832ffa1ddab3c5a874249577f502d"} Jan 21 15:45:56 crc kubenswrapper[5021]: I0121 15:45:56.371729 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-7bbf467d99-62cpf" Jan 21 15:45:56 crc kubenswrapper[5021]: I0121 15:45:56.371753 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-7bbf467d99-62cpf" Jan 21 15:45:56 crc kubenswrapper[5021]: I0121 15:45:56.429949 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-7bbf467d99-62cpf" podStartSLOduration=2.429928052 podStartE2EDuration="2.429928052s" podCreationTimestamp="2026-01-21 15:45:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:45:56.426210733 +0000 UTC m=+1297.961324622" watchObservedRunningTime="2026-01-21 15:45:56.429928052 +0000 UTC m=+1297.965041941" Jan 21 15:45:56 crc kubenswrapper[5021]: I0121 15:45:56.431965 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-c795c5585-m9bzp" podStartSLOduration=2.4319482470000002 podStartE2EDuration="2.431948247s" podCreationTimestamp="2026-01-21 15:45:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:45:56.404385236 +0000 UTC m=+1297.939499125" watchObservedRunningTime="2026-01-21 15:45:56.431948247 +0000 UTC m=+1297.967062146" Jan 21 15:45:56 crc kubenswrapper[5021]: I0121 15:45:56.915033 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Jan 21 15:45:59 crc kubenswrapper[5021]: I0121 15:45:59.018359 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 21 15:45:59 crc kubenswrapper[5021]: I0121 15:45:59.018928 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="71fa8674-249d-42fa-b95b-bf52592b8998" containerName="ceilometer-central-agent" containerID="cri-o://a0037c2b90a36c6f015cf859d28c5ddce8552bc8d7ef42b6ef27223bf6026f8b" gracePeriod=30 Jan 21 15:45:59 crc kubenswrapper[5021]: I0121 15:45:59.018981 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="71fa8674-249d-42fa-b95b-bf52592b8998" containerName="proxy-httpd" containerID="cri-o://596c1cb00a39323ab80b8ae8eeccc0bff50779bd7028c347a7ec06f50ef5adf0" gracePeriod=30 Jan 21 15:45:59 crc kubenswrapper[5021]: I0121 15:45:59.019046 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="71fa8674-249d-42fa-b95b-bf52592b8998" containerName="sg-core" containerID="cri-o://04b7414bccea40c7f838cafd44d5c9018728f3c618548462c3b7782186544f11" gracePeriod=30 Jan 21 15:45:59 crc kubenswrapper[5021]: I0121 15:45:59.019081 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="71fa8674-249d-42fa-b95b-bf52592b8998" containerName="ceilometer-notification-agent" containerID="cri-o://8c5d90a6ad824fea3ab5ae2edf9cc8362f2364eddb0ab757e7d2232bc9e81433" gracePeriod=30 Jan 21 15:45:59 crc kubenswrapper[5021]: E0121 15:45:59.263192 5021 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod71fa8674_249d_42fa_b95b_bf52592b8998.slice/crio-conmon-596c1cb00a39323ab80b8ae8eeccc0bff50779bd7028c347a7ec06f50ef5adf0.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod71fa8674_249d_42fa_b95b_bf52592b8998.slice/crio-conmon-04b7414bccea40c7f838cafd44d5c9018728f3c618548462c3b7782186544f11.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod71fa8674_249d_42fa_b95b_bf52592b8998.slice/crio-596c1cb00a39323ab80b8ae8eeccc0bff50779bd7028c347a7ec06f50ef5adf0.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod71fa8674_249d_42fa_b95b_bf52592b8998.slice/crio-04b7414bccea40c7f838cafd44d5c9018728f3c618548462c3b7782186544f11.scope\": RecentStats: unable to find data in memory cache]" Jan 21 15:45:59 crc kubenswrapper[5021]: I0121 15:45:59.413978 5021 generic.go:334] "Generic (PLEG): container finished" podID="71fa8674-249d-42fa-b95b-bf52592b8998" containerID="596c1cb00a39323ab80b8ae8eeccc0bff50779bd7028c347a7ec06f50ef5adf0" exitCode=0 Jan 21 15:45:59 crc kubenswrapper[5021]: I0121 15:45:59.414018 5021 generic.go:334] "Generic (PLEG): container finished" podID="71fa8674-249d-42fa-b95b-bf52592b8998" containerID="04b7414bccea40c7f838cafd44d5c9018728f3c618548462c3b7782186544f11" exitCode=2 Jan 21 15:45:59 crc kubenswrapper[5021]: I0121 15:45:59.414045 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"71fa8674-249d-42fa-b95b-bf52592b8998","Type":"ContainerDied","Data":"596c1cb00a39323ab80b8ae8eeccc0bff50779bd7028c347a7ec06f50ef5adf0"} Jan 21 15:45:59 crc kubenswrapper[5021]: I0121 15:45:59.414099 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"71fa8674-249d-42fa-b95b-bf52592b8998","Type":"ContainerDied","Data":"04b7414bccea40c7f838cafd44d5c9018728f3c618548462c3b7782186544f11"} Jan 21 15:46:00 crc kubenswrapper[5021]: I0121 15:46:00.428003 5021 generic.go:334] "Generic (PLEG): container finished" podID="71fa8674-249d-42fa-b95b-bf52592b8998" containerID="a0037c2b90a36c6f015cf859d28c5ddce8552bc8d7ef42b6ef27223bf6026f8b" exitCode=0 Jan 21 15:46:00 crc kubenswrapper[5021]: I0121 15:46:00.428160 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"71fa8674-249d-42fa-b95b-bf52592b8998","Type":"ContainerDied","Data":"a0037c2b90a36c6f015cf859d28c5ddce8552bc8d7ef42b6ef27223bf6026f8b"} Jan 21 15:46:00 crc kubenswrapper[5021]: I0121 15:46:00.758435 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6bb4fc677f-d5z29" Jan 21 15:46:00 crc kubenswrapper[5021]: I0121 15:46:00.835776 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-674b76c99f-vlmjb"] Jan 21 15:46:00 crc kubenswrapper[5021]: I0121 15:46:00.836072 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-674b76c99f-vlmjb" podUID="c5cd9226-205d-4888-bad3-0da3ccfd61c0" containerName="dnsmasq-dns" containerID="cri-o://898a30881cb859aa7268ec5cb80a6cda72a8fc0d85fabd06b75fc2168b6a2784" gracePeriod=10 Jan 21 15:46:02 crc kubenswrapper[5021]: I0121 15:46:02.451509 5021 generic.go:334] "Generic (PLEG): container finished" podID="c5cd9226-205d-4888-bad3-0da3ccfd61c0" containerID="898a30881cb859aa7268ec5cb80a6cda72a8fc0d85fabd06b75fc2168b6a2784" exitCode=0 Jan 21 15:46:02 crc kubenswrapper[5021]: I0121 15:46:02.451944 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-674b76c99f-vlmjb" event={"ID":"c5cd9226-205d-4888-bad3-0da3ccfd61c0","Type":"ContainerDied","Data":"898a30881cb859aa7268ec5cb80a6cda72a8fc0d85fabd06b75fc2168b6a2784"} Jan 21 15:46:02 crc kubenswrapper[5021]: I0121 15:46:02.457207 5021 generic.go:334] "Generic (PLEG): container finished" podID="71fa8674-249d-42fa-b95b-bf52592b8998" containerID="8c5d90a6ad824fea3ab5ae2edf9cc8362f2364eddb0ab757e7d2232bc9e81433" exitCode=0 Jan 21 15:46:02 crc kubenswrapper[5021]: I0121 15:46:02.457246 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"71fa8674-249d-42fa-b95b-bf52592b8998","Type":"ContainerDied","Data":"8c5d90a6ad824fea3ab5ae2edf9cc8362f2364eddb0ab757e7d2232bc9e81433"} Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.391122 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.485098 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-7bbf467d99-62cpf" Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.485676 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca","Type":"ContainerStarted","Data":"f5e3f741e90cddcaa56c488a9ce56cfd5d36717ab8f8f3d4ee72791c52b6336c"} Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.492758 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"71fa8674-249d-42fa-b95b-bf52592b8998","Type":"ContainerDied","Data":"fd86e1cdc4e18ba2aab0779d8ce6261b2bc164bc56eae7c821f65fd9614f49ce"} Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.492804 5021 scope.go:117] "RemoveContainer" containerID="596c1cb00a39323ab80b8ae8eeccc0bff50779bd7028c347a7ec06f50ef5adf0" Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.493032 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.501806 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-7bbf467d99-62cpf" Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.506513 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.18160716 podStartE2EDuration="17.506496954s" podCreationTimestamp="2026-01-21 15:45:47 +0000 UTC" firstStartedPulling="2026-01-21 15:45:48.750201735 +0000 UTC m=+1290.285315634" lastFinishedPulling="2026-01-21 15:46:04.075091539 +0000 UTC m=+1305.610205428" observedRunningTime="2026-01-21 15:46:04.503614685 +0000 UTC m=+1306.038728574" watchObservedRunningTime="2026-01-21 15:46:04.506496954 +0000 UTC m=+1306.041610843" Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.525141 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-674b76c99f-vlmjb" Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.534262 5021 scope.go:117] "RemoveContainer" containerID="04b7414bccea40c7f838cafd44d5c9018728f3c618548462c3b7782186544f11" Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.568873 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71fa8674-249d-42fa-b95b-bf52592b8998-combined-ca-bundle\") pod \"71fa8674-249d-42fa-b95b-bf52592b8998\" (UID: \"71fa8674-249d-42fa-b95b-bf52592b8998\") " Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.568991 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/71fa8674-249d-42fa-b95b-bf52592b8998-log-httpd\") pod \"71fa8674-249d-42fa-b95b-bf52592b8998\" (UID: \"71fa8674-249d-42fa-b95b-bf52592b8998\") " Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.569038 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/71fa8674-249d-42fa-b95b-bf52592b8998-scripts\") pod \"71fa8674-249d-42fa-b95b-bf52592b8998\" (UID: \"71fa8674-249d-42fa-b95b-bf52592b8998\") " Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.569102 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/71fa8674-249d-42fa-b95b-bf52592b8998-run-httpd\") pod \"71fa8674-249d-42fa-b95b-bf52592b8998\" (UID: \"71fa8674-249d-42fa-b95b-bf52592b8998\") " Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.569190 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/71fa8674-249d-42fa-b95b-bf52592b8998-config-data\") pod \"71fa8674-249d-42fa-b95b-bf52592b8998\" (UID: \"71fa8674-249d-42fa-b95b-bf52592b8998\") " Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.569337 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jh54c\" (UniqueName: \"kubernetes.io/projected/71fa8674-249d-42fa-b95b-bf52592b8998-kube-api-access-jh54c\") pod \"71fa8674-249d-42fa-b95b-bf52592b8998\" (UID: \"71fa8674-249d-42fa-b95b-bf52592b8998\") " Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.569355 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/71fa8674-249d-42fa-b95b-bf52592b8998-sg-core-conf-yaml\") pod \"71fa8674-249d-42fa-b95b-bf52592b8998\" (UID: \"71fa8674-249d-42fa-b95b-bf52592b8998\") " Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.569663 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/71fa8674-249d-42fa-b95b-bf52592b8998-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "71fa8674-249d-42fa-b95b-bf52592b8998" (UID: "71fa8674-249d-42fa-b95b-bf52592b8998"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.570262 5021 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/71fa8674-249d-42fa-b95b-bf52592b8998-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.570570 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/71fa8674-249d-42fa-b95b-bf52592b8998-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "71fa8674-249d-42fa-b95b-bf52592b8998" (UID: "71fa8674-249d-42fa-b95b-bf52592b8998"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.575473 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/71fa8674-249d-42fa-b95b-bf52592b8998-kube-api-access-jh54c" (OuterVolumeSpecName: "kube-api-access-jh54c") pod "71fa8674-249d-42fa-b95b-bf52592b8998" (UID: "71fa8674-249d-42fa-b95b-bf52592b8998"). InnerVolumeSpecName "kube-api-access-jh54c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.579660 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/71fa8674-249d-42fa-b95b-bf52592b8998-scripts" (OuterVolumeSpecName: "scripts") pod "71fa8674-249d-42fa-b95b-bf52592b8998" (UID: "71fa8674-249d-42fa-b95b-bf52592b8998"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.596676 5021 scope.go:117] "RemoveContainer" containerID="8c5d90a6ad824fea3ab5ae2edf9cc8362f2364eddb0ab757e7d2232bc9e81433" Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.622890 5021 scope.go:117] "RemoveContainer" containerID="a0037c2b90a36c6f015cf859d28c5ddce8552bc8d7ef42b6ef27223bf6026f8b" Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.634939 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/71fa8674-249d-42fa-b95b-bf52592b8998-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "71fa8674-249d-42fa-b95b-bf52592b8998" (UID: "71fa8674-249d-42fa-b95b-bf52592b8998"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.671049 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c5cd9226-205d-4888-bad3-0da3ccfd61c0-dns-swift-storage-0\") pod \"c5cd9226-205d-4888-bad3-0da3ccfd61c0\" (UID: \"c5cd9226-205d-4888-bad3-0da3ccfd61c0\") " Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.671108 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c5cd9226-205d-4888-bad3-0da3ccfd61c0-config\") pod \"c5cd9226-205d-4888-bad3-0da3ccfd61c0\" (UID: \"c5cd9226-205d-4888-bad3-0da3ccfd61c0\") " Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.671539 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c5cd9226-205d-4888-bad3-0da3ccfd61c0-ovsdbserver-nb\") pod \"c5cd9226-205d-4888-bad3-0da3ccfd61c0\" (UID: \"c5cd9226-205d-4888-bad3-0da3ccfd61c0\") " Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.671610 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jscv7\" (UniqueName: \"kubernetes.io/projected/c5cd9226-205d-4888-bad3-0da3ccfd61c0-kube-api-access-jscv7\") pod \"c5cd9226-205d-4888-bad3-0da3ccfd61c0\" (UID: \"c5cd9226-205d-4888-bad3-0da3ccfd61c0\") " Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.671639 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c5cd9226-205d-4888-bad3-0da3ccfd61c0-ovsdbserver-sb\") pod \"c5cd9226-205d-4888-bad3-0da3ccfd61c0\" (UID: \"c5cd9226-205d-4888-bad3-0da3ccfd61c0\") " Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.671856 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c5cd9226-205d-4888-bad3-0da3ccfd61c0-dns-svc\") pod \"c5cd9226-205d-4888-bad3-0da3ccfd61c0\" (UID: \"c5cd9226-205d-4888-bad3-0da3ccfd61c0\") " Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.672549 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jh54c\" (UniqueName: \"kubernetes.io/projected/71fa8674-249d-42fa-b95b-bf52592b8998-kube-api-access-jh54c\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.672576 5021 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/71fa8674-249d-42fa-b95b-bf52592b8998-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.672590 5021 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/71fa8674-249d-42fa-b95b-bf52592b8998-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.672603 5021 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/71fa8674-249d-42fa-b95b-bf52592b8998-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.696084 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/71fa8674-249d-42fa-b95b-bf52592b8998-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "71fa8674-249d-42fa-b95b-bf52592b8998" (UID: "71fa8674-249d-42fa-b95b-bf52592b8998"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.716641 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c5cd9226-205d-4888-bad3-0da3ccfd61c0-kube-api-access-jscv7" (OuterVolumeSpecName: "kube-api-access-jscv7") pod "c5cd9226-205d-4888-bad3-0da3ccfd61c0" (UID: "c5cd9226-205d-4888-bad3-0da3ccfd61c0"). InnerVolumeSpecName "kube-api-access-jscv7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.768433 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c5cd9226-205d-4888-bad3-0da3ccfd61c0-config" (OuterVolumeSpecName: "config") pod "c5cd9226-205d-4888-bad3-0da3ccfd61c0" (UID: "c5cd9226-205d-4888-bad3-0da3ccfd61c0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.780508 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/71fa8674-249d-42fa-b95b-bf52592b8998-config-data" (OuterVolumeSpecName: "config-data") pod "71fa8674-249d-42fa-b95b-bf52592b8998" (UID: "71fa8674-249d-42fa-b95b-bf52592b8998"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.781393 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71fa8674-249d-42fa-b95b-bf52592b8998-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.781432 5021 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c5cd9226-205d-4888-bad3-0da3ccfd61c0-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.781448 5021 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/71fa8674-249d-42fa-b95b-bf52592b8998-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.781463 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jscv7\" (UniqueName: \"kubernetes.io/projected/c5cd9226-205d-4888-bad3-0da3ccfd61c0-kube-api-access-jscv7\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.790407 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c5cd9226-205d-4888-bad3-0da3ccfd61c0-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "c5cd9226-205d-4888-bad3-0da3ccfd61c0" (UID: "c5cd9226-205d-4888-bad3-0da3ccfd61c0"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.808513 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c5cd9226-205d-4888-bad3-0da3ccfd61c0-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "c5cd9226-205d-4888-bad3-0da3ccfd61c0" (UID: "c5cd9226-205d-4888-bad3-0da3ccfd61c0"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.820748 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c5cd9226-205d-4888-bad3-0da3ccfd61c0-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "c5cd9226-205d-4888-bad3-0da3ccfd61c0" (UID: "c5cd9226-205d-4888-bad3-0da3ccfd61c0"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.831109 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c5cd9226-205d-4888-bad3-0da3ccfd61c0-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "c5cd9226-205d-4888-bad3-0da3ccfd61c0" (UID: "c5cd9226-205d-4888-bad3-0da3ccfd61c0"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.843284 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.850977 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.883444 5021 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c5cd9226-205d-4888-bad3-0da3ccfd61c0-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.883478 5021 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c5cd9226-205d-4888-bad3-0da3ccfd61c0-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.883493 5021 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c5cd9226-205d-4888-bad3-0da3ccfd61c0-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.883505 5021 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c5cd9226-205d-4888-bad3-0da3ccfd61c0-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.943542 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 21 15:46:04 crc kubenswrapper[5021]: E0121 15:46:04.944383 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5cd9226-205d-4888-bad3-0da3ccfd61c0" containerName="dnsmasq-dns" Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.944404 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5cd9226-205d-4888-bad3-0da3ccfd61c0" containerName="dnsmasq-dns" Jan 21 15:46:04 crc kubenswrapper[5021]: E0121 15:46:04.944424 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="71fa8674-249d-42fa-b95b-bf52592b8998" containerName="ceilometer-notification-agent" Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.944432 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="71fa8674-249d-42fa-b95b-bf52592b8998" containerName="ceilometer-notification-agent" Jan 21 15:46:04 crc kubenswrapper[5021]: E0121 15:46:04.944454 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="71fa8674-249d-42fa-b95b-bf52592b8998" containerName="proxy-httpd" Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.944460 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="71fa8674-249d-42fa-b95b-bf52592b8998" containerName="proxy-httpd" Jan 21 15:46:04 crc kubenswrapper[5021]: E0121 15:46:04.944477 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="71fa8674-249d-42fa-b95b-bf52592b8998" containerName="sg-core" Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.944485 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="71fa8674-249d-42fa-b95b-bf52592b8998" containerName="sg-core" Jan 21 15:46:04 crc kubenswrapper[5021]: E0121 15:46:04.944516 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="71fa8674-249d-42fa-b95b-bf52592b8998" containerName="ceilometer-central-agent" Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.944524 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="71fa8674-249d-42fa-b95b-bf52592b8998" containerName="ceilometer-central-agent" Jan 21 15:46:04 crc kubenswrapper[5021]: E0121 15:46:04.944546 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5cd9226-205d-4888-bad3-0da3ccfd61c0" containerName="init" Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.944552 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5cd9226-205d-4888-bad3-0da3ccfd61c0" containerName="init" Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.944874 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="71fa8674-249d-42fa-b95b-bf52592b8998" containerName="ceilometer-notification-agent" Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.944918 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="71fa8674-249d-42fa-b95b-bf52592b8998" containerName="ceilometer-central-agent" Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.944929 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5cd9226-205d-4888-bad3-0da3ccfd61c0" containerName="dnsmasq-dns" Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.944942 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="71fa8674-249d-42fa-b95b-bf52592b8998" containerName="sg-core" Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.946274 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="71fa8674-249d-42fa-b95b-bf52592b8998" containerName="proxy-httpd" Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.954649 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.958249 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 21 15:46:04 crc kubenswrapper[5021]: I0121 15:46:04.958576 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 21 15:46:05 crc kubenswrapper[5021]: I0121 15:46:05.022575 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 21 15:46:05 crc kubenswrapper[5021]: I0121 15:46:05.086651 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f6160307-8450-4492-96b7-b09c826919db-scripts\") pod \"ceilometer-0\" (UID: \"f6160307-8450-4492-96b7-b09c826919db\") " pod="openstack/ceilometer-0" Jan 21 15:46:05 crc kubenswrapper[5021]: I0121 15:46:05.086712 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f6160307-8450-4492-96b7-b09c826919db-run-httpd\") pod \"ceilometer-0\" (UID: \"f6160307-8450-4492-96b7-b09c826919db\") " pod="openstack/ceilometer-0" Jan 21 15:46:05 crc kubenswrapper[5021]: I0121 15:46:05.086739 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f6160307-8450-4492-96b7-b09c826919db-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f6160307-8450-4492-96b7-b09c826919db\") " pod="openstack/ceilometer-0" Jan 21 15:46:05 crc kubenswrapper[5021]: I0121 15:46:05.086840 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6160307-8450-4492-96b7-b09c826919db-config-data\") pod \"ceilometer-0\" (UID: \"f6160307-8450-4492-96b7-b09c826919db\") " pod="openstack/ceilometer-0" Jan 21 15:46:05 crc kubenswrapper[5021]: I0121 15:46:05.086928 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c8n6v\" (UniqueName: \"kubernetes.io/projected/f6160307-8450-4492-96b7-b09c826919db-kube-api-access-c8n6v\") pod \"ceilometer-0\" (UID: \"f6160307-8450-4492-96b7-b09c826919db\") " pod="openstack/ceilometer-0" Jan 21 15:46:05 crc kubenswrapper[5021]: I0121 15:46:05.086960 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6160307-8450-4492-96b7-b09c826919db-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f6160307-8450-4492-96b7-b09c826919db\") " pod="openstack/ceilometer-0" Jan 21 15:46:05 crc kubenswrapper[5021]: I0121 15:46:05.087003 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f6160307-8450-4492-96b7-b09c826919db-log-httpd\") pod \"ceilometer-0\" (UID: \"f6160307-8450-4492-96b7-b09c826919db\") " pod="openstack/ceilometer-0" Jan 21 15:46:05 crc kubenswrapper[5021]: I0121 15:46:05.188399 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6160307-8450-4492-96b7-b09c826919db-config-data\") pod \"ceilometer-0\" (UID: \"f6160307-8450-4492-96b7-b09c826919db\") " pod="openstack/ceilometer-0" Jan 21 15:46:05 crc kubenswrapper[5021]: I0121 15:46:05.188479 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c8n6v\" (UniqueName: \"kubernetes.io/projected/f6160307-8450-4492-96b7-b09c826919db-kube-api-access-c8n6v\") pod \"ceilometer-0\" (UID: \"f6160307-8450-4492-96b7-b09c826919db\") " pod="openstack/ceilometer-0" Jan 21 15:46:05 crc kubenswrapper[5021]: I0121 15:46:05.188514 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6160307-8450-4492-96b7-b09c826919db-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f6160307-8450-4492-96b7-b09c826919db\") " pod="openstack/ceilometer-0" Jan 21 15:46:05 crc kubenswrapper[5021]: I0121 15:46:05.188561 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f6160307-8450-4492-96b7-b09c826919db-log-httpd\") pod \"ceilometer-0\" (UID: \"f6160307-8450-4492-96b7-b09c826919db\") " pod="openstack/ceilometer-0" Jan 21 15:46:05 crc kubenswrapper[5021]: I0121 15:46:05.188664 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f6160307-8450-4492-96b7-b09c826919db-scripts\") pod \"ceilometer-0\" (UID: \"f6160307-8450-4492-96b7-b09c826919db\") " pod="openstack/ceilometer-0" Jan 21 15:46:05 crc kubenswrapper[5021]: I0121 15:46:05.188693 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f6160307-8450-4492-96b7-b09c826919db-run-httpd\") pod \"ceilometer-0\" (UID: \"f6160307-8450-4492-96b7-b09c826919db\") " pod="openstack/ceilometer-0" Jan 21 15:46:05 crc kubenswrapper[5021]: I0121 15:46:05.188716 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f6160307-8450-4492-96b7-b09c826919db-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f6160307-8450-4492-96b7-b09c826919db\") " pod="openstack/ceilometer-0" Jan 21 15:46:05 crc kubenswrapper[5021]: I0121 15:46:05.189319 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f6160307-8450-4492-96b7-b09c826919db-log-httpd\") pod \"ceilometer-0\" (UID: \"f6160307-8450-4492-96b7-b09c826919db\") " pod="openstack/ceilometer-0" Jan 21 15:46:05 crc kubenswrapper[5021]: I0121 15:46:05.189380 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f6160307-8450-4492-96b7-b09c826919db-run-httpd\") pod \"ceilometer-0\" (UID: \"f6160307-8450-4492-96b7-b09c826919db\") " pod="openstack/ceilometer-0" Jan 21 15:46:05 crc kubenswrapper[5021]: I0121 15:46:05.192990 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f6160307-8450-4492-96b7-b09c826919db-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f6160307-8450-4492-96b7-b09c826919db\") " pod="openstack/ceilometer-0" Jan 21 15:46:05 crc kubenswrapper[5021]: I0121 15:46:05.193452 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6160307-8450-4492-96b7-b09c826919db-config-data\") pod \"ceilometer-0\" (UID: \"f6160307-8450-4492-96b7-b09c826919db\") " pod="openstack/ceilometer-0" Jan 21 15:46:05 crc kubenswrapper[5021]: I0121 15:46:05.194561 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f6160307-8450-4492-96b7-b09c826919db-scripts\") pod \"ceilometer-0\" (UID: \"f6160307-8450-4492-96b7-b09c826919db\") " pod="openstack/ceilometer-0" Jan 21 15:46:05 crc kubenswrapper[5021]: I0121 15:46:05.208009 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c8n6v\" (UniqueName: \"kubernetes.io/projected/f6160307-8450-4492-96b7-b09c826919db-kube-api-access-c8n6v\") pod \"ceilometer-0\" (UID: \"f6160307-8450-4492-96b7-b09c826919db\") " pod="openstack/ceilometer-0" Jan 21 15:46:05 crc kubenswrapper[5021]: I0121 15:46:05.208883 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6160307-8450-4492-96b7-b09c826919db-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f6160307-8450-4492-96b7-b09c826919db\") " pod="openstack/ceilometer-0" Jan 21 15:46:05 crc kubenswrapper[5021]: I0121 15:46:05.392296 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 21 15:46:05 crc kubenswrapper[5021]: I0121 15:46:05.533621 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-674b76c99f-vlmjb" Jan 21 15:46:05 crc kubenswrapper[5021]: I0121 15:46:05.537127 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-674b76c99f-vlmjb" event={"ID":"c5cd9226-205d-4888-bad3-0da3ccfd61c0","Type":"ContainerDied","Data":"c3e68e561b14f05717a3335080b622498b2d2e5f3f4d3a23e2bbe31a09d7aead"} Jan 21 15:46:05 crc kubenswrapper[5021]: I0121 15:46:05.537204 5021 scope.go:117] "RemoveContainer" containerID="898a30881cb859aa7268ec5cb80a6cda72a8fc0d85fabd06b75fc2168b6a2784" Jan 21 15:46:05 crc kubenswrapper[5021]: I0121 15:46:05.583014 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-674b76c99f-vlmjb"] Jan 21 15:46:05 crc kubenswrapper[5021]: I0121 15:46:05.597254 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-674b76c99f-vlmjb"] Jan 21 15:46:05 crc kubenswrapper[5021]: I0121 15:46:05.607405 5021 scope.go:117] "RemoveContainer" containerID="1bf2717fa1e27107e56b6e549b9fef6cd651fdaf8870560664a4c00506fcb995" Jan 21 15:46:05 crc kubenswrapper[5021]: I0121 15:46:05.923296 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 21 15:46:05 crc kubenswrapper[5021]: W0121 15:46:05.936328 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf6160307_8450_4492_96b7_b09c826919db.slice/crio-32c70c6ea9957022ef82fce0b8811c081959fd09b585ffdd8ac1146857cce47c WatchSource:0}: Error finding container 32c70c6ea9957022ef82fce0b8811c081959fd09b585ffdd8ac1146857cce47c: Status 404 returned error can't find the container with id 32c70c6ea9957022ef82fce0b8811c081959fd09b585ffdd8ac1146857cce47c Jan 21 15:46:05 crc kubenswrapper[5021]: I0121 15:46:05.938573 5021 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 21 15:46:06 crc kubenswrapper[5021]: I0121 15:46:06.543507 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f6160307-8450-4492-96b7-b09c826919db","Type":"ContainerStarted","Data":"32c70c6ea9957022ef82fce0b8811c081959fd09b585ffdd8ac1146857cce47c"} Jan 21 15:46:06 crc kubenswrapper[5021]: I0121 15:46:06.748319 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="71fa8674-249d-42fa-b95b-bf52592b8998" path="/var/lib/kubelet/pods/71fa8674-249d-42fa-b95b-bf52592b8998/volumes" Jan 21 15:46:06 crc kubenswrapper[5021]: I0121 15:46:06.749197 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c5cd9226-205d-4888-bad3-0da3ccfd61c0" path="/var/lib/kubelet/pods/c5cd9226-205d-4888-bad3-0da3ccfd61c0/volumes" Jan 21 15:46:07 crc kubenswrapper[5021]: I0121 15:46:07.559278 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f6160307-8450-4492-96b7-b09c826919db","Type":"ContainerStarted","Data":"8ab34c578f1bfcdc54710774b53e185a546dbbbff8dc990f73a12bb3f2043672"} Jan 21 15:46:07 crc kubenswrapper[5021]: I0121 15:46:07.559542 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f6160307-8450-4492-96b7-b09c826919db","Type":"ContainerStarted","Data":"55f5963d406f7c0bc674fef059a34f5ad473080ab784a93b9d67f2ae188bff0b"} Jan 21 15:46:07 crc kubenswrapper[5021]: I0121 15:46:07.963536 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 21 15:46:07 crc kubenswrapper[5021]: I0121 15:46:07.963932 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="a0bd89c3-8fac-437e-9be8-b0703dd1be4c" containerName="kube-state-metrics" containerID="cri-o://63a56a368dac364949819e8a1463f6184ccb6e30760a5a57a555cda751e7913c" gracePeriod=30 Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.325713 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-pzb9h"] Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.328832 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-pzb9h" Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.339416 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-pzb9h"] Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.440111 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-gd72h"] Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.441679 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-gd72h" Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.459466 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-42mb4\" (UniqueName: \"kubernetes.io/projected/971451eb-9adb-4bfb-b3a3-9f0e31d8ff0e-kube-api-access-42mb4\") pod \"nova-api-db-create-pzb9h\" (UID: \"971451eb-9adb-4bfb-b3a3-9f0e31d8ff0e\") " pod="openstack/nova-api-db-create-pzb9h" Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.459683 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/971451eb-9adb-4bfb-b3a3-9f0e31d8ff0e-operator-scripts\") pod \"nova-api-db-create-pzb9h\" (UID: \"971451eb-9adb-4bfb-b3a3-9f0e31d8ff0e\") " pod="openstack/nova-api-db-create-pzb9h" Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.500776 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-48eb-account-create-update-dcvwx"] Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.509205 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-48eb-account-create-update-dcvwx" Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.515217 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.564689 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-42mb4\" (UniqueName: \"kubernetes.io/projected/971451eb-9adb-4bfb-b3a3-9f0e31d8ff0e-kube-api-access-42mb4\") pod \"nova-api-db-create-pzb9h\" (UID: \"971451eb-9adb-4bfb-b3a3-9f0e31d8ff0e\") " pod="openstack/nova-api-db-create-pzb9h" Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.564811 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a580a9c4-cecd-4c8c-b928-8e8d5a686f60-operator-scripts\") pod \"nova-cell0-db-create-gd72h\" (UID: \"a580a9c4-cecd-4c8c-b928-8e8d5a686f60\") " pod="openstack/nova-cell0-db-create-gd72h" Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.564945 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/971451eb-9adb-4bfb-b3a3-9f0e31d8ff0e-operator-scripts\") pod \"nova-api-db-create-pzb9h\" (UID: \"971451eb-9adb-4bfb-b3a3-9f0e31d8ff0e\") " pod="openstack/nova-api-db-create-pzb9h" Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.565179 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cv6j7\" (UniqueName: \"kubernetes.io/projected/a580a9c4-cecd-4c8c-b928-8e8d5a686f60-kube-api-access-cv6j7\") pod \"nova-cell0-db-create-gd72h\" (UID: \"a580a9c4-cecd-4c8c-b928-8e8d5a686f60\") " pod="openstack/nova-cell0-db-create-gd72h" Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.568195 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/971451eb-9adb-4bfb-b3a3-9f0e31d8ff0e-operator-scripts\") pod \"nova-api-db-create-pzb9h\" (UID: \"971451eb-9adb-4bfb-b3a3-9f0e31d8ff0e\") " pod="openstack/nova-api-db-create-pzb9h" Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.571593 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-gd72h"] Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.623247 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-48eb-account-create-update-dcvwx"] Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.638677 5021 generic.go:334] "Generic (PLEG): container finished" podID="a0bd89c3-8fac-437e-9be8-b0703dd1be4c" containerID="63a56a368dac364949819e8a1463f6184ccb6e30760a5a57a555cda751e7913c" exitCode=2 Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.638773 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"a0bd89c3-8fac-437e-9be8-b0703dd1be4c","Type":"ContainerDied","Data":"63a56a368dac364949819e8a1463f6184ccb6e30760a5a57a555cda751e7913c"} Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.645008 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-42mb4\" (UniqueName: \"kubernetes.io/projected/971451eb-9adb-4bfb-b3a3-9f0e31d8ff0e-kube-api-access-42mb4\") pod \"nova-api-db-create-pzb9h\" (UID: \"971451eb-9adb-4bfb-b3a3-9f0e31d8ff0e\") " pod="openstack/nova-api-db-create-pzb9h" Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.666940 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/02027ced-d71f-420b-9f26-10adafa52051-operator-scripts\") pod \"nova-api-48eb-account-create-update-dcvwx\" (UID: \"02027ced-d71f-420b-9f26-10adafa52051\") " pod="openstack/nova-api-48eb-account-create-update-dcvwx" Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.667035 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cv6j7\" (UniqueName: \"kubernetes.io/projected/a580a9c4-cecd-4c8c-b928-8e8d5a686f60-kube-api-access-cv6j7\") pod \"nova-cell0-db-create-gd72h\" (UID: \"a580a9c4-cecd-4c8c-b928-8e8d5a686f60\") " pod="openstack/nova-cell0-db-create-gd72h" Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.667109 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a580a9c4-cecd-4c8c-b928-8e8d5a686f60-operator-scripts\") pod \"nova-cell0-db-create-gd72h\" (UID: \"a580a9c4-cecd-4c8c-b928-8e8d5a686f60\") " pod="openstack/nova-cell0-db-create-gd72h" Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.667157 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vfsvn\" (UniqueName: \"kubernetes.io/projected/02027ced-d71f-420b-9f26-10adafa52051-kube-api-access-vfsvn\") pod \"nova-api-48eb-account-create-update-dcvwx\" (UID: \"02027ced-d71f-420b-9f26-10adafa52051\") " pod="openstack/nova-api-48eb-account-create-update-dcvwx" Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.670643 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a580a9c4-cecd-4c8c-b928-8e8d5a686f60-operator-scripts\") pod \"nova-cell0-db-create-gd72h\" (UID: \"a580a9c4-cecd-4c8c-b928-8e8d5a686f60\") " pod="openstack/nova-cell0-db-create-gd72h" Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.675071 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-pzb9h" Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.687491 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f6160307-8450-4492-96b7-b09c826919db","Type":"ContainerStarted","Data":"7c3e16e55e662254318a8fe6f2cfeb3a8b68be5d8afa30f23faccc2de9884c49"} Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.690097 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cv6j7\" (UniqueName: \"kubernetes.io/projected/a580a9c4-cecd-4c8c-b928-8e8d5a686f60-kube-api-access-cv6j7\") pod \"nova-cell0-db-create-gd72h\" (UID: \"a580a9c4-cecd-4c8c-b928-8e8d5a686f60\") " pod="openstack/nova-cell0-db-create-gd72h" Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.769546 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vfsvn\" (UniqueName: \"kubernetes.io/projected/02027ced-d71f-420b-9f26-10adafa52051-kube-api-access-vfsvn\") pod \"nova-api-48eb-account-create-update-dcvwx\" (UID: \"02027ced-d71f-420b-9f26-10adafa52051\") " pod="openstack/nova-api-48eb-account-create-update-dcvwx" Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.769606 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/02027ced-d71f-420b-9f26-10adafa52051-operator-scripts\") pod \"nova-api-48eb-account-create-update-dcvwx\" (UID: \"02027ced-d71f-420b-9f26-10adafa52051\") " pod="openstack/nova-api-48eb-account-create-update-dcvwx" Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.770364 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-ztp85"] Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.770436 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/02027ced-d71f-420b-9f26-10adafa52051-operator-scripts\") pod \"nova-api-48eb-account-create-update-dcvwx\" (UID: \"02027ced-d71f-420b-9f26-10adafa52051\") " pod="openstack/nova-api-48eb-account-create-update-dcvwx" Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.773581 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-ztp85"] Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.773775 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-ztp85" Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.777479 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-gd72h" Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.779725 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-1670-account-create-update-rpbpq"] Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.781111 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-1670-account-create-update-rpbpq" Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.786767 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.792809 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vfsvn\" (UniqueName: \"kubernetes.io/projected/02027ced-d71f-420b-9f26-10adafa52051-kube-api-access-vfsvn\") pod \"nova-api-48eb-account-create-update-dcvwx\" (UID: \"02027ced-d71f-420b-9f26-10adafa52051\") " pod="openstack/nova-api-48eb-account-create-update-dcvwx" Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.793080 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-1670-account-create-update-rpbpq"] Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.808091 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-48eb-account-create-update-dcvwx" Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.822499 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.876616 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4601017a-6691-4486-8bc0-e469284ec4e2-operator-scripts\") pod \"nova-cell0-1670-account-create-update-rpbpq\" (UID: \"4601017a-6691-4486-8bc0-e469284ec4e2\") " pod="openstack/nova-cell0-1670-account-create-update-rpbpq" Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.877503 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3ae2bbb9-1d77-4394-b7a9-30c9ce199ffe-operator-scripts\") pod \"nova-cell1-db-create-ztp85\" (UID: \"3ae2bbb9-1d77-4394-b7a9-30c9ce199ffe\") " pod="openstack/nova-cell1-db-create-ztp85" Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.877708 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fvkrd\" (UniqueName: \"kubernetes.io/projected/3ae2bbb9-1d77-4394-b7a9-30c9ce199ffe-kube-api-access-fvkrd\") pod \"nova-cell1-db-create-ztp85\" (UID: \"3ae2bbb9-1d77-4394-b7a9-30c9ce199ffe\") " pod="openstack/nova-cell1-db-create-ztp85" Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.878074 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2ll4n\" (UniqueName: \"kubernetes.io/projected/4601017a-6691-4486-8bc0-e469284ec4e2-kube-api-access-2ll4n\") pod \"nova-cell0-1670-account-create-update-rpbpq\" (UID: \"4601017a-6691-4486-8bc0-e469284ec4e2\") " pod="openstack/nova-cell0-1670-account-create-update-rpbpq" Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.889199 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-4f73-account-create-update-tcc9q"] Jan 21 15:46:08 crc kubenswrapper[5021]: E0121 15:46:08.890070 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0bd89c3-8fac-437e-9be8-b0703dd1be4c" containerName="kube-state-metrics" Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.890107 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0bd89c3-8fac-437e-9be8-b0703dd1be4c" containerName="kube-state-metrics" Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.890483 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="a0bd89c3-8fac-437e-9be8-b0703dd1be4c" containerName="kube-state-metrics" Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.896356 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-4f73-account-create-update-tcc9q" Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.900970 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.909092 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-4f73-account-create-update-tcc9q"] Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.982524 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5rp5n\" (UniqueName: \"kubernetes.io/projected/a0bd89c3-8fac-437e-9be8-b0703dd1be4c-kube-api-access-5rp5n\") pod \"a0bd89c3-8fac-437e-9be8-b0703dd1be4c\" (UID: \"a0bd89c3-8fac-437e-9be8-b0703dd1be4c\") " Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.982790 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4601017a-6691-4486-8bc0-e469284ec4e2-operator-scripts\") pod \"nova-cell0-1670-account-create-update-rpbpq\" (UID: \"4601017a-6691-4486-8bc0-e469284ec4e2\") " pod="openstack/nova-cell0-1670-account-create-update-rpbpq" Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.982849 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c693b8ce-a34d-4a4b-b6e8-1495764299d7-operator-scripts\") pod \"nova-cell1-4f73-account-create-update-tcc9q\" (UID: \"c693b8ce-a34d-4a4b-b6e8-1495764299d7\") " pod="openstack/nova-cell1-4f73-account-create-update-tcc9q" Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.982876 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3ae2bbb9-1d77-4394-b7a9-30c9ce199ffe-operator-scripts\") pod \"nova-cell1-db-create-ztp85\" (UID: \"3ae2bbb9-1d77-4394-b7a9-30c9ce199ffe\") " pod="openstack/nova-cell1-db-create-ztp85" Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.982945 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-htk4w\" (UniqueName: \"kubernetes.io/projected/c693b8ce-a34d-4a4b-b6e8-1495764299d7-kube-api-access-htk4w\") pod \"nova-cell1-4f73-account-create-update-tcc9q\" (UID: \"c693b8ce-a34d-4a4b-b6e8-1495764299d7\") " pod="openstack/nova-cell1-4f73-account-create-update-tcc9q" Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.982978 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fvkrd\" (UniqueName: \"kubernetes.io/projected/3ae2bbb9-1d77-4394-b7a9-30c9ce199ffe-kube-api-access-fvkrd\") pod \"nova-cell1-db-create-ztp85\" (UID: \"3ae2bbb9-1d77-4394-b7a9-30c9ce199ffe\") " pod="openstack/nova-cell1-db-create-ztp85" Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.983068 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2ll4n\" (UniqueName: \"kubernetes.io/projected/4601017a-6691-4486-8bc0-e469284ec4e2-kube-api-access-2ll4n\") pod \"nova-cell0-1670-account-create-update-rpbpq\" (UID: \"4601017a-6691-4486-8bc0-e469284ec4e2\") " pod="openstack/nova-cell0-1670-account-create-update-rpbpq" Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.984150 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3ae2bbb9-1d77-4394-b7a9-30c9ce199ffe-operator-scripts\") pod \"nova-cell1-db-create-ztp85\" (UID: \"3ae2bbb9-1d77-4394-b7a9-30c9ce199ffe\") " pod="openstack/nova-cell1-db-create-ztp85" Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.985047 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4601017a-6691-4486-8bc0-e469284ec4e2-operator-scripts\") pod \"nova-cell0-1670-account-create-update-rpbpq\" (UID: \"4601017a-6691-4486-8bc0-e469284ec4e2\") " pod="openstack/nova-cell0-1670-account-create-update-rpbpq" Jan 21 15:46:08 crc kubenswrapper[5021]: I0121 15:46:08.993208 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0bd89c3-8fac-437e-9be8-b0703dd1be4c-kube-api-access-5rp5n" (OuterVolumeSpecName: "kube-api-access-5rp5n") pod "a0bd89c3-8fac-437e-9be8-b0703dd1be4c" (UID: "a0bd89c3-8fac-437e-9be8-b0703dd1be4c"). InnerVolumeSpecName "kube-api-access-5rp5n". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:46:09 crc kubenswrapper[5021]: I0121 15:46:09.005529 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2ll4n\" (UniqueName: \"kubernetes.io/projected/4601017a-6691-4486-8bc0-e469284ec4e2-kube-api-access-2ll4n\") pod \"nova-cell0-1670-account-create-update-rpbpq\" (UID: \"4601017a-6691-4486-8bc0-e469284ec4e2\") " pod="openstack/nova-cell0-1670-account-create-update-rpbpq" Jan 21 15:46:09 crc kubenswrapper[5021]: I0121 15:46:09.011689 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fvkrd\" (UniqueName: \"kubernetes.io/projected/3ae2bbb9-1d77-4394-b7a9-30c9ce199ffe-kube-api-access-fvkrd\") pod \"nova-cell1-db-create-ztp85\" (UID: \"3ae2bbb9-1d77-4394-b7a9-30c9ce199ffe\") " pod="openstack/nova-cell1-db-create-ztp85" Jan 21 15:46:09 crc kubenswrapper[5021]: I0121 15:46:09.084619 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-htk4w\" (UniqueName: \"kubernetes.io/projected/c693b8ce-a34d-4a4b-b6e8-1495764299d7-kube-api-access-htk4w\") pod \"nova-cell1-4f73-account-create-update-tcc9q\" (UID: \"c693b8ce-a34d-4a4b-b6e8-1495764299d7\") " pod="openstack/nova-cell1-4f73-account-create-update-tcc9q" Jan 21 15:46:09 crc kubenswrapper[5021]: I0121 15:46:09.085116 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c693b8ce-a34d-4a4b-b6e8-1495764299d7-operator-scripts\") pod \"nova-cell1-4f73-account-create-update-tcc9q\" (UID: \"c693b8ce-a34d-4a4b-b6e8-1495764299d7\") " pod="openstack/nova-cell1-4f73-account-create-update-tcc9q" Jan 21 15:46:09 crc kubenswrapper[5021]: I0121 15:46:09.085174 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5rp5n\" (UniqueName: \"kubernetes.io/projected/a0bd89c3-8fac-437e-9be8-b0703dd1be4c-kube-api-access-5rp5n\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:09 crc kubenswrapper[5021]: I0121 15:46:09.085860 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c693b8ce-a34d-4a4b-b6e8-1495764299d7-operator-scripts\") pod \"nova-cell1-4f73-account-create-update-tcc9q\" (UID: \"c693b8ce-a34d-4a4b-b6e8-1495764299d7\") " pod="openstack/nova-cell1-4f73-account-create-update-tcc9q" Jan 21 15:46:09 crc kubenswrapper[5021]: I0121 15:46:09.105621 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-htk4w\" (UniqueName: \"kubernetes.io/projected/c693b8ce-a34d-4a4b-b6e8-1495764299d7-kube-api-access-htk4w\") pod \"nova-cell1-4f73-account-create-update-tcc9q\" (UID: \"c693b8ce-a34d-4a4b-b6e8-1495764299d7\") " pod="openstack/nova-cell1-4f73-account-create-update-tcc9q" Jan 21 15:46:09 crc kubenswrapper[5021]: I0121 15:46:09.156788 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-ztp85" Jan 21 15:46:09 crc kubenswrapper[5021]: I0121 15:46:09.212193 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-1670-account-create-update-rpbpq" Jan 21 15:46:09 crc kubenswrapper[5021]: I0121 15:46:09.233875 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-4f73-account-create-update-tcc9q" Jan 21 15:46:09 crc kubenswrapper[5021]: I0121 15:46:09.338168 5021 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-674b76c99f-vlmjb" podUID="c5cd9226-205d-4888-bad3-0da3ccfd61c0" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.164:5353: i/o timeout" Jan 21 15:46:09 crc kubenswrapper[5021]: I0121 15:46:09.343441 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-pzb9h"] Jan 21 15:46:09 crc kubenswrapper[5021]: I0121 15:46:09.700547 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"a0bd89c3-8fac-437e-9be8-b0703dd1be4c","Type":"ContainerDied","Data":"6b38cbb0c3d4bb4a828fc533cdc6efb17a8297d506b00dcac4e172012e73516d"} Jan 21 15:46:09 crc kubenswrapper[5021]: I0121 15:46:09.700648 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 21 15:46:09 crc kubenswrapper[5021]: I0121 15:46:09.701091 5021 scope.go:117] "RemoveContainer" containerID="63a56a368dac364949819e8a1463f6184ccb6e30760a5a57a555cda751e7913c" Jan 21 15:46:09 crc kubenswrapper[5021]: I0121 15:46:09.757358 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 21 15:46:09 crc kubenswrapper[5021]: I0121 15:46:09.769263 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 21 15:46:09 crc kubenswrapper[5021]: I0121 15:46:09.818567 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-gd72h"] Jan 21 15:46:09 crc kubenswrapper[5021]: I0121 15:46:09.830160 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-48eb-account-create-update-dcvwx"] Jan 21 15:46:09 crc kubenswrapper[5021]: I0121 15:46:09.858346 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Jan 21 15:46:09 crc kubenswrapper[5021]: I0121 15:46:09.863288 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 21 15:46:09 crc kubenswrapper[5021]: I0121 15:46:09.867690 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Jan 21 15:46:09 crc kubenswrapper[5021]: I0121 15:46:09.872218 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Jan 21 15:46:09 crc kubenswrapper[5021]: I0121 15:46:09.886453 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 21 15:46:09 crc kubenswrapper[5021]: I0121 15:46:09.912200 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/5073fbf8-f2ef-49e7-8b07-d90b1822b414-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"5073fbf8-f2ef-49e7-8b07-d90b1822b414\") " pod="openstack/kube-state-metrics-0" Jan 21 15:46:09 crc kubenswrapper[5021]: I0121 15:46:09.920341 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/5073fbf8-f2ef-49e7-8b07-d90b1822b414-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"5073fbf8-f2ef-49e7-8b07-d90b1822b414\") " pod="openstack/kube-state-metrics-0" Jan 21 15:46:09 crc kubenswrapper[5021]: I0121 15:46:09.920570 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5073fbf8-f2ef-49e7-8b07-d90b1822b414-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"5073fbf8-f2ef-49e7-8b07-d90b1822b414\") " pod="openstack/kube-state-metrics-0" Jan 21 15:46:09 crc kubenswrapper[5021]: I0121 15:46:09.921946 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wkzv5\" (UniqueName: \"kubernetes.io/projected/5073fbf8-f2ef-49e7-8b07-d90b1822b414-kube-api-access-wkzv5\") pod \"kube-state-metrics-0\" (UID: \"5073fbf8-f2ef-49e7-8b07-d90b1822b414\") " pod="openstack/kube-state-metrics-0" Jan 21 15:46:10 crc kubenswrapper[5021]: I0121 15:46:10.023776 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/5073fbf8-f2ef-49e7-8b07-d90b1822b414-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"5073fbf8-f2ef-49e7-8b07-d90b1822b414\") " pod="openstack/kube-state-metrics-0" Jan 21 15:46:10 crc kubenswrapper[5021]: I0121 15:46:10.023836 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/5073fbf8-f2ef-49e7-8b07-d90b1822b414-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"5073fbf8-f2ef-49e7-8b07-d90b1822b414\") " pod="openstack/kube-state-metrics-0" Jan 21 15:46:10 crc kubenswrapper[5021]: I0121 15:46:10.023939 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5073fbf8-f2ef-49e7-8b07-d90b1822b414-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"5073fbf8-f2ef-49e7-8b07-d90b1822b414\") " pod="openstack/kube-state-metrics-0" Jan 21 15:46:10 crc kubenswrapper[5021]: I0121 15:46:10.024018 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wkzv5\" (UniqueName: \"kubernetes.io/projected/5073fbf8-f2ef-49e7-8b07-d90b1822b414-kube-api-access-wkzv5\") pod \"kube-state-metrics-0\" (UID: \"5073fbf8-f2ef-49e7-8b07-d90b1822b414\") " pod="openstack/kube-state-metrics-0" Jan 21 15:46:10 crc kubenswrapper[5021]: I0121 15:46:10.032960 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/5073fbf8-f2ef-49e7-8b07-d90b1822b414-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"5073fbf8-f2ef-49e7-8b07-d90b1822b414\") " pod="openstack/kube-state-metrics-0" Jan 21 15:46:10 crc kubenswrapper[5021]: I0121 15:46:10.042663 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5073fbf8-f2ef-49e7-8b07-d90b1822b414-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"5073fbf8-f2ef-49e7-8b07-d90b1822b414\") " pod="openstack/kube-state-metrics-0" Jan 21 15:46:10 crc kubenswrapper[5021]: I0121 15:46:10.042725 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/5073fbf8-f2ef-49e7-8b07-d90b1822b414-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"5073fbf8-f2ef-49e7-8b07-d90b1822b414\") " pod="openstack/kube-state-metrics-0" Jan 21 15:46:10 crc kubenswrapper[5021]: I0121 15:46:10.048312 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wkzv5\" (UniqueName: \"kubernetes.io/projected/5073fbf8-f2ef-49e7-8b07-d90b1822b414-kube-api-access-wkzv5\") pod \"kube-state-metrics-0\" (UID: \"5073fbf8-f2ef-49e7-8b07-d90b1822b414\") " pod="openstack/kube-state-metrics-0" Jan 21 15:46:10 crc kubenswrapper[5021]: I0121 15:46:10.322871 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 21 15:46:10 crc kubenswrapper[5021]: I0121 15:46:10.435528 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-ztp85"] Jan 21 15:46:10 crc kubenswrapper[5021]: W0121 15:46:10.445212 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3ae2bbb9_1d77_4394_b7a9_30c9ce199ffe.slice/crio-5388f69de759b9905b92cfe23a1edfcfe86a8622233cc5a12b045778e9e4f1d1 WatchSource:0}: Error finding container 5388f69de759b9905b92cfe23a1edfcfe86a8622233cc5a12b045778e9e4f1d1: Status 404 returned error can't find the container with id 5388f69de759b9905b92cfe23a1edfcfe86a8622233cc5a12b045778e9e4f1d1 Jan 21 15:46:10 crc kubenswrapper[5021]: I0121 15:46:10.536646 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-4f73-account-create-update-tcc9q"] Jan 21 15:46:10 crc kubenswrapper[5021]: I0121 15:46:10.654215 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-1670-account-create-update-rpbpq"] Jan 21 15:46:10 crc kubenswrapper[5021]: W0121 15:46:10.677238 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4601017a_6691_4486_8bc0_e469284ec4e2.slice/crio-42837972c99a225a45444c65b7bb1693c138a8b2aa3a5d539d0e4214ffb284f8 WatchSource:0}: Error finding container 42837972c99a225a45444c65b7bb1693c138a8b2aa3a5d539d0e4214ffb284f8: Status 404 returned error can't find the container with id 42837972c99a225a45444c65b7bb1693c138a8b2aa3a5d539d0e4214ffb284f8 Jan 21 15:46:10 crc kubenswrapper[5021]: I0121 15:46:10.721890 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-48eb-account-create-update-dcvwx" event={"ID":"02027ced-d71f-420b-9f26-10adafa52051","Type":"ContainerStarted","Data":"ce41f6ce53276698af88c81dec88a1a28dfd43d7f45ad89f9badad8d1a417043"} Jan 21 15:46:10 crc kubenswrapper[5021]: I0121 15:46:10.722004 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-48eb-account-create-update-dcvwx" event={"ID":"02027ced-d71f-420b-9f26-10adafa52051","Type":"ContainerStarted","Data":"cb7b1dc0367c608507c7438d14336816d4ce1f9140aefc76b6441aef88bcd994"} Jan 21 15:46:10 crc kubenswrapper[5021]: I0121 15:46:10.727881 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-1670-account-create-update-rpbpq" event={"ID":"4601017a-6691-4486-8bc0-e469284ec4e2","Type":"ContainerStarted","Data":"42837972c99a225a45444c65b7bb1693c138a8b2aa3a5d539d0e4214ffb284f8"} Jan 21 15:46:10 crc kubenswrapper[5021]: I0121 15:46:10.730617 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-4f73-account-create-update-tcc9q" event={"ID":"c693b8ce-a34d-4a4b-b6e8-1495764299d7","Type":"ContainerStarted","Data":"326ddb45aa3d66c1a0c123988af6037807156944aa956cc7406492d1dcd2abe9"} Jan 21 15:46:10 crc kubenswrapper[5021]: I0121 15:46:10.735508 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-ztp85" event={"ID":"3ae2bbb9-1d77-4394-b7a9-30c9ce199ffe","Type":"ContainerStarted","Data":"5388f69de759b9905b92cfe23a1edfcfe86a8622233cc5a12b045778e9e4f1d1"} Jan 21 15:46:10 crc kubenswrapper[5021]: I0121 15:46:10.763608 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-48eb-account-create-update-dcvwx" podStartSLOduration=2.763584683 podStartE2EDuration="2.763584683s" podCreationTimestamp="2026-01-21 15:46:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:46:10.736639249 +0000 UTC m=+1312.271753138" watchObservedRunningTime="2026-01-21 15:46:10.763584683 +0000 UTC m=+1312.298698572" Jan 21 15:46:10 crc kubenswrapper[5021]: I0121 15:46:10.774457 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0bd89c3-8fac-437e-9be8-b0703dd1be4c" path="/var/lib/kubelet/pods/a0bd89c3-8fac-437e-9be8-b0703dd1be4c/volumes" Jan 21 15:46:10 crc kubenswrapper[5021]: I0121 15:46:10.775898 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-gd72h" event={"ID":"a580a9c4-cecd-4c8c-b928-8e8d5a686f60","Type":"ContainerStarted","Data":"fc1d0ae289408d89e328587bc4c294526c1aecae909b14446688e9bf7d5d6da7"} Jan 21 15:46:10 crc kubenswrapper[5021]: I0121 15:46:10.776099 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-gd72h" event={"ID":"a580a9c4-cecd-4c8c-b928-8e8d5a686f60","Type":"ContainerStarted","Data":"63b20b4302ee8394511ee41680a421772e2997c387af422ae586d3b5cf3e3fcc"} Jan 21 15:46:10 crc kubenswrapper[5021]: I0121 15:46:10.779664 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-pzb9h" event={"ID":"971451eb-9adb-4bfb-b3a3-9f0e31d8ff0e","Type":"ContainerStarted","Data":"71256733ff9fecce00f3c2e2b2d62f457f32792c57777df4cc9c78ee8ade1f0a"} Jan 21 15:46:10 crc kubenswrapper[5021]: I0121 15:46:10.782543 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-pzb9h" event={"ID":"971451eb-9adb-4bfb-b3a3-9f0e31d8ff0e","Type":"ContainerStarted","Data":"9f8fdf9da76793322f3cf985642387deae177f07d8cf63c9d9b8424aa8b7817f"} Jan 21 15:46:10 crc kubenswrapper[5021]: I0121 15:46:10.803619 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-db-create-gd72h" podStartSLOduration=2.803380518 podStartE2EDuration="2.803380518s" podCreationTimestamp="2026-01-21 15:46:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:46:10.79834123 +0000 UTC m=+1312.333455119" watchObservedRunningTime="2026-01-21 15:46:10.803380518 +0000 UTC m=+1312.338494407" Jan 21 15:46:10 crc kubenswrapper[5021]: I0121 15:46:10.823313 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-db-create-pzb9h" podStartSLOduration=2.82329105 podStartE2EDuration="2.82329105s" podCreationTimestamp="2026-01-21 15:46:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:46:10.820855723 +0000 UTC m=+1312.355969612" watchObservedRunningTime="2026-01-21 15:46:10.82329105 +0000 UTC m=+1312.358404939" Jan 21 15:46:10 crc kubenswrapper[5021]: I0121 15:46:10.971225 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 21 15:46:11 crc kubenswrapper[5021]: W0121 15:46:11.073697 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5073fbf8_f2ef_49e7_8b07_d90b1822b414.slice/crio-c3611b4d67192fc20c2bc7d87bf6b3c5c011452c57850853d104e33d6f79dcea WatchSource:0}: Error finding container c3611b4d67192fc20c2bc7d87bf6b3c5c011452c57850853d104e33d6f79dcea: Status 404 returned error can't find the container with id c3611b4d67192fc20c2bc7d87bf6b3c5c011452c57850853d104e33d6f79dcea Jan 21 15:46:11 crc kubenswrapper[5021]: I0121 15:46:11.610665 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 21 15:46:11 crc kubenswrapper[5021]: I0121 15:46:11.790149 5021 generic.go:334] "Generic (PLEG): container finished" podID="c693b8ce-a34d-4a4b-b6e8-1495764299d7" containerID="f4109119afa6957e21234d569dc42804535d59145205c52b5588d6791cfc42fc" exitCode=0 Jan 21 15:46:11 crc kubenswrapper[5021]: I0121 15:46:11.790252 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-4f73-account-create-update-tcc9q" event={"ID":"c693b8ce-a34d-4a4b-b6e8-1495764299d7","Type":"ContainerDied","Data":"f4109119afa6957e21234d569dc42804535d59145205c52b5588d6791cfc42fc"} Jan 21 15:46:11 crc kubenswrapper[5021]: I0121 15:46:11.792120 5021 generic.go:334] "Generic (PLEG): container finished" podID="3ae2bbb9-1d77-4394-b7a9-30c9ce199ffe" containerID="fbb10e55d2fb1f75b2a4d2ea3d962bd38837ca0992ed6675c730962b996dda86" exitCode=0 Jan 21 15:46:11 crc kubenswrapper[5021]: I0121 15:46:11.792181 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-ztp85" event={"ID":"3ae2bbb9-1d77-4394-b7a9-30c9ce199ffe","Type":"ContainerDied","Data":"fbb10e55d2fb1f75b2a4d2ea3d962bd38837ca0992ed6675c730962b996dda86"} Jan 21 15:46:11 crc kubenswrapper[5021]: I0121 15:46:11.796528 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f6160307-8450-4492-96b7-b09c826919db","Type":"ContainerStarted","Data":"2b7db18b225d27b56cb2c1ae1c5b1775fbab7c4fb30a05a478ca3a7f1b86cf0b"} Jan 21 15:46:11 crc kubenswrapper[5021]: I0121 15:46:11.796813 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 21 15:46:11 crc kubenswrapper[5021]: I0121 15:46:11.799210 5021 generic.go:334] "Generic (PLEG): container finished" podID="a580a9c4-cecd-4c8c-b928-8e8d5a686f60" containerID="fc1d0ae289408d89e328587bc4c294526c1aecae909b14446688e9bf7d5d6da7" exitCode=0 Jan 21 15:46:11 crc kubenswrapper[5021]: I0121 15:46:11.799297 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-gd72h" event={"ID":"a580a9c4-cecd-4c8c-b928-8e8d5a686f60","Type":"ContainerDied","Data":"fc1d0ae289408d89e328587bc4c294526c1aecae909b14446688e9bf7d5d6da7"} Jan 21 15:46:11 crc kubenswrapper[5021]: I0121 15:46:11.801428 5021 generic.go:334] "Generic (PLEG): container finished" podID="971451eb-9adb-4bfb-b3a3-9f0e31d8ff0e" containerID="71256733ff9fecce00f3c2e2b2d62f457f32792c57777df4cc9c78ee8ade1f0a" exitCode=0 Jan 21 15:46:11 crc kubenswrapper[5021]: I0121 15:46:11.801463 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-pzb9h" event={"ID":"971451eb-9adb-4bfb-b3a3-9f0e31d8ff0e","Type":"ContainerDied","Data":"71256733ff9fecce00f3c2e2b2d62f457f32792c57777df4cc9c78ee8ade1f0a"} Jan 21 15:46:11 crc kubenswrapper[5021]: I0121 15:46:11.802632 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"5073fbf8-f2ef-49e7-8b07-d90b1822b414","Type":"ContainerStarted","Data":"c3611b4d67192fc20c2bc7d87bf6b3c5c011452c57850853d104e33d6f79dcea"} Jan 21 15:46:11 crc kubenswrapper[5021]: I0121 15:46:11.807766 5021 generic.go:334] "Generic (PLEG): container finished" podID="02027ced-d71f-420b-9f26-10adafa52051" containerID="ce41f6ce53276698af88c81dec88a1a28dfd43d7f45ad89f9badad8d1a417043" exitCode=0 Jan 21 15:46:11 crc kubenswrapper[5021]: I0121 15:46:11.807852 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-48eb-account-create-update-dcvwx" event={"ID":"02027ced-d71f-420b-9f26-10adafa52051","Type":"ContainerDied","Data":"ce41f6ce53276698af88c81dec88a1a28dfd43d7f45ad89f9badad8d1a417043"} Jan 21 15:46:11 crc kubenswrapper[5021]: I0121 15:46:11.810511 5021 generic.go:334] "Generic (PLEG): container finished" podID="4601017a-6691-4486-8bc0-e469284ec4e2" containerID="0d24f64a0731db8d85873c1c608acc97fb5bbe26445980ffbd17a463bbba03f3" exitCode=0 Jan 21 15:46:11 crc kubenswrapper[5021]: I0121 15:46:11.810546 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-1670-account-create-update-rpbpq" event={"ID":"4601017a-6691-4486-8bc0-e469284ec4e2","Type":"ContainerDied","Data":"0d24f64a0731db8d85873c1c608acc97fb5bbe26445980ffbd17a463bbba03f3"} Jan 21 15:46:11 crc kubenswrapper[5021]: I0121 15:46:11.825045 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.363625693 podStartE2EDuration="7.825026985s" podCreationTimestamp="2026-01-21 15:46:04 +0000 UTC" firstStartedPulling="2026-01-21 15:46:05.938377879 +0000 UTC m=+1307.473491768" lastFinishedPulling="2026-01-21 15:46:10.399779171 +0000 UTC m=+1311.934893060" observedRunningTime="2026-01-21 15:46:11.824663764 +0000 UTC m=+1313.359777653" watchObservedRunningTime="2026-01-21 15:46:11.825026985 +0000 UTC m=+1313.360140884" Jan 21 15:46:12 crc kubenswrapper[5021]: I0121 15:46:12.357535 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 15:46:12 crc kubenswrapper[5021]: I0121 15:46:12.357602 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 15:46:12 crc kubenswrapper[5021]: I0121 15:46:12.822796 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"5073fbf8-f2ef-49e7-8b07-d90b1822b414","Type":"ContainerStarted","Data":"81466562175ec89583498029b73cadcc8d26846a5fbb385a68626b0ba993a0c2"} Jan 21 15:46:12 crc kubenswrapper[5021]: I0121 15:46:12.823581 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f6160307-8450-4492-96b7-b09c826919db" containerName="ceilometer-central-agent" containerID="cri-o://55f5963d406f7c0bc674fef059a34f5ad473080ab784a93b9d67f2ae188bff0b" gracePeriod=30 Jan 21 15:46:12 crc kubenswrapper[5021]: I0121 15:46:12.823664 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f6160307-8450-4492-96b7-b09c826919db" containerName="ceilometer-notification-agent" containerID="cri-o://8ab34c578f1bfcdc54710774b53e185a546dbbbff8dc990f73a12bb3f2043672" gracePeriod=30 Jan 21 15:46:12 crc kubenswrapper[5021]: I0121 15:46:12.823667 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f6160307-8450-4492-96b7-b09c826919db" containerName="proxy-httpd" containerID="cri-o://2b7db18b225d27b56cb2c1ae1c5b1775fbab7c4fb30a05a478ca3a7f1b86cf0b" gracePeriod=30 Jan 21 15:46:12 crc kubenswrapper[5021]: I0121 15:46:12.823673 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f6160307-8450-4492-96b7-b09c826919db" containerName="sg-core" containerID="cri-o://7c3e16e55e662254318a8fe6f2cfeb3a8b68be5d8afa30f23faccc2de9884c49" gracePeriod=30 Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.354163 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-ztp85" Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.410469 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3ae2bbb9-1d77-4394-b7a9-30c9ce199ffe-operator-scripts\") pod \"3ae2bbb9-1d77-4394-b7a9-30c9ce199ffe\" (UID: \"3ae2bbb9-1d77-4394-b7a9-30c9ce199ffe\") " Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.410555 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fvkrd\" (UniqueName: \"kubernetes.io/projected/3ae2bbb9-1d77-4394-b7a9-30c9ce199ffe-kube-api-access-fvkrd\") pod \"3ae2bbb9-1d77-4394-b7a9-30c9ce199ffe\" (UID: \"3ae2bbb9-1d77-4394-b7a9-30c9ce199ffe\") " Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.413795 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3ae2bbb9-1d77-4394-b7a9-30c9ce199ffe-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3ae2bbb9-1d77-4394-b7a9-30c9ce199ffe" (UID: "3ae2bbb9-1d77-4394-b7a9-30c9ce199ffe"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.420279 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ae2bbb9-1d77-4394-b7a9-30c9ce199ffe-kube-api-access-fvkrd" (OuterVolumeSpecName: "kube-api-access-fvkrd") pod "3ae2bbb9-1d77-4394-b7a9-30c9ce199ffe" (UID: "3ae2bbb9-1d77-4394-b7a9-30c9ce199ffe"). InnerVolumeSpecName "kube-api-access-fvkrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.421495 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fvkrd\" (UniqueName: \"kubernetes.io/projected/3ae2bbb9-1d77-4394-b7a9-30c9ce199ffe-kube-api-access-fvkrd\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.421531 5021 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3ae2bbb9-1d77-4394-b7a9-30c9ce199ffe-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.424953 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-1670-account-create-update-rpbpq" Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.453307 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-pzb9h" Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.465465 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-4f73-account-create-update-tcc9q" Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.500351 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-gd72h" Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.512416 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-48eb-account-create-update-dcvwx" Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.522699 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2ll4n\" (UniqueName: \"kubernetes.io/projected/4601017a-6691-4486-8bc0-e469284ec4e2-kube-api-access-2ll4n\") pod \"4601017a-6691-4486-8bc0-e469284ec4e2\" (UID: \"4601017a-6691-4486-8bc0-e469284ec4e2\") " Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.522750 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4601017a-6691-4486-8bc0-e469284ec4e2-operator-scripts\") pod \"4601017a-6691-4486-8bc0-e469284ec4e2\" (UID: \"4601017a-6691-4486-8bc0-e469284ec4e2\") " Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.522812 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/971451eb-9adb-4bfb-b3a3-9f0e31d8ff0e-operator-scripts\") pod \"971451eb-9adb-4bfb-b3a3-9f0e31d8ff0e\" (UID: \"971451eb-9adb-4bfb-b3a3-9f0e31d8ff0e\") " Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.522843 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c693b8ce-a34d-4a4b-b6e8-1495764299d7-operator-scripts\") pod \"c693b8ce-a34d-4a4b-b6e8-1495764299d7\" (UID: \"c693b8ce-a34d-4a4b-b6e8-1495764299d7\") " Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.522860 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htk4w\" (UniqueName: \"kubernetes.io/projected/c693b8ce-a34d-4a4b-b6e8-1495764299d7-kube-api-access-htk4w\") pod \"c693b8ce-a34d-4a4b-b6e8-1495764299d7\" (UID: \"c693b8ce-a34d-4a4b-b6e8-1495764299d7\") " Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.523011 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-42mb4\" (UniqueName: \"kubernetes.io/projected/971451eb-9adb-4bfb-b3a3-9f0e31d8ff0e-kube-api-access-42mb4\") pod \"971451eb-9adb-4bfb-b3a3-9f0e31d8ff0e\" (UID: \"971451eb-9adb-4bfb-b3a3-9f0e31d8ff0e\") " Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.524115 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/971451eb-9adb-4bfb-b3a3-9f0e31d8ff0e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "971451eb-9adb-4bfb-b3a3-9f0e31d8ff0e" (UID: "971451eb-9adb-4bfb-b3a3-9f0e31d8ff0e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.524234 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c693b8ce-a34d-4a4b-b6e8-1495764299d7-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c693b8ce-a34d-4a4b-b6e8-1495764299d7" (UID: "c693b8ce-a34d-4a4b-b6e8-1495764299d7"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.524538 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4601017a-6691-4486-8bc0-e469284ec4e2-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4601017a-6691-4486-8bc0-e469284ec4e2" (UID: "4601017a-6691-4486-8bc0-e469284ec4e2"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.528928 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c693b8ce-a34d-4a4b-b6e8-1495764299d7-kube-api-access-htk4w" (OuterVolumeSpecName: "kube-api-access-htk4w") pod "c693b8ce-a34d-4a4b-b6e8-1495764299d7" (UID: "c693b8ce-a34d-4a4b-b6e8-1495764299d7"). InnerVolumeSpecName "kube-api-access-htk4w". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.529025 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4601017a-6691-4486-8bc0-e469284ec4e2-kube-api-access-2ll4n" (OuterVolumeSpecName: "kube-api-access-2ll4n") pod "4601017a-6691-4486-8bc0-e469284ec4e2" (UID: "4601017a-6691-4486-8bc0-e469284ec4e2"). InnerVolumeSpecName "kube-api-access-2ll4n". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.530794 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/971451eb-9adb-4bfb-b3a3-9f0e31d8ff0e-kube-api-access-42mb4" (OuterVolumeSpecName: "kube-api-access-42mb4") pod "971451eb-9adb-4bfb-b3a3-9f0e31d8ff0e" (UID: "971451eb-9adb-4bfb-b3a3-9f0e31d8ff0e"). InnerVolumeSpecName "kube-api-access-42mb4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.624572 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cv6j7\" (UniqueName: \"kubernetes.io/projected/a580a9c4-cecd-4c8c-b928-8e8d5a686f60-kube-api-access-cv6j7\") pod \"a580a9c4-cecd-4c8c-b928-8e8d5a686f60\" (UID: \"a580a9c4-cecd-4c8c-b928-8e8d5a686f60\") " Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.624874 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/02027ced-d71f-420b-9f26-10adafa52051-operator-scripts\") pod \"02027ced-d71f-420b-9f26-10adafa52051\" (UID: \"02027ced-d71f-420b-9f26-10adafa52051\") " Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.625089 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vfsvn\" (UniqueName: \"kubernetes.io/projected/02027ced-d71f-420b-9f26-10adafa52051-kube-api-access-vfsvn\") pod \"02027ced-d71f-420b-9f26-10adafa52051\" (UID: \"02027ced-d71f-420b-9f26-10adafa52051\") " Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.625817 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/02027ced-d71f-420b-9f26-10adafa52051-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "02027ced-d71f-420b-9f26-10adafa52051" (UID: "02027ced-d71f-420b-9f26-10adafa52051"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.633094 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a580a9c4-cecd-4c8c-b928-8e8d5a686f60-operator-scripts\") pod \"a580a9c4-cecd-4c8c-b928-8e8d5a686f60\" (UID: \"a580a9c4-cecd-4c8c-b928-8e8d5a686f60\") " Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.634610 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a580a9c4-cecd-4c8c-b928-8e8d5a686f60-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a580a9c4-cecd-4c8c-b928-8e8d5a686f60" (UID: "a580a9c4-cecd-4c8c-b928-8e8d5a686f60"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.634653 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-42mb4\" (UniqueName: \"kubernetes.io/projected/971451eb-9adb-4bfb-b3a3-9f0e31d8ff0e-kube-api-access-42mb4\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.634808 5021 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/02027ced-d71f-420b-9f26-10adafa52051-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.634829 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2ll4n\" (UniqueName: \"kubernetes.io/projected/4601017a-6691-4486-8bc0-e469284ec4e2-kube-api-access-2ll4n\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.634847 5021 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4601017a-6691-4486-8bc0-e469284ec4e2-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.634862 5021 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/971451eb-9adb-4bfb-b3a3-9f0e31d8ff0e-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.634874 5021 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c693b8ce-a34d-4a4b-b6e8-1495764299d7-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.634886 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htk4w\" (UniqueName: \"kubernetes.io/projected/c693b8ce-a34d-4a4b-b6e8-1495764299d7-kube-api-access-htk4w\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.636266 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/02027ced-d71f-420b-9f26-10adafa52051-kube-api-access-vfsvn" (OuterVolumeSpecName: "kube-api-access-vfsvn") pod "02027ced-d71f-420b-9f26-10adafa52051" (UID: "02027ced-d71f-420b-9f26-10adafa52051"). InnerVolumeSpecName "kube-api-access-vfsvn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.644268 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a580a9c4-cecd-4c8c-b928-8e8d5a686f60-kube-api-access-cv6j7" (OuterVolumeSpecName: "kube-api-access-cv6j7") pod "a580a9c4-cecd-4c8c-b928-8e8d5a686f60" (UID: "a580a9c4-cecd-4c8c-b928-8e8d5a686f60"). InnerVolumeSpecName "kube-api-access-cv6j7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.736774 5021 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a580a9c4-cecd-4c8c-b928-8e8d5a686f60-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.736846 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cv6j7\" (UniqueName: \"kubernetes.io/projected/a580a9c4-cecd-4c8c-b928-8e8d5a686f60-kube-api-access-cv6j7\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.736874 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vfsvn\" (UniqueName: \"kubernetes.io/projected/02027ced-d71f-420b-9f26-10adafa52051-kube-api-access-vfsvn\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.833241 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-pzb9h" event={"ID":"971451eb-9adb-4bfb-b3a3-9f0e31d8ff0e","Type":"ContainerDied","Data":"9f8fdf9da76793322f3cf985642387deae177f07d8cf63c9d9b8424aa8b7817f"} Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.834420 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9f8fdf9da76793322f3cf985642387deae177f07d8cf63c9d9b8424aa8b7817f" Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.833269 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-pzb9h" Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.835389 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-48eb-account-create-update-dcvwx" Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.835398 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-48eb-account-create-update-dcvwx" event={"ID":"02027ced-d71f-420b-9f26-10adafa52051","Type":"ContainerDied","Data":"cb7b1dc0367c608507c7438d14336816d4ce1f9140aefc76b6441aef88bcd994"} Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.835474 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cb7b1dc0367c608507c7438d14336816d4ce1f9140aefc76b6441aef88bcd994" Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.837361 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-1670-account-create-update-rpbpq" event={"ID":"4601017a-6691-4486-8bc0-e469284ec4e2","Type":"ContainerDied","Data":"42837972c99a225a45444c65b7bb1693c138a8b2aa3a5d539d0e4214ffb284f8"} Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.837400 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-1670-account-create-update-rpbpq" Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.837411 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="42837972c99a225a45444c65b7bb1693c138a8b2aa3a5d539d0e4214ffb284f8" Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.841161 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-4f73-account-create-update-tcc9q" event={"ID":"c693b8ce-a34d-4a4b-b6e8-1495764299d7","Type":"ContainerDied","Data":"326ddb45aa3d66c1a0c123988af6037807156944aa956cc7406492d1dcd2abe9"} Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.841234 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="326ddb45aa3d66c1a0c123988af6037807156944aa956cc7406492d1dcd2abe9" Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.841189 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-4f73-account-create-update-tcc9q" Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.842541 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-ztp85" Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.842533 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-ztp85" event={"ID":"3ae2bbb9-1d77-4394-b7a9-30c9ce199ffe","Type":"ContainerDied","Data":"5388f69de759b9905b92cfe23a1edfcfe86a8622233cc5a12b045778e9e4f1d1"} Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.842685 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5388f69de759b9905b92cfe23a1edfcfe86a8622233cc5a12b045778e9e4f1d1" Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.851481 5021 generic.go:334] "Generic (PLEG): container finished" podID="f6160307-8450-4492-96b7-b09c826919db" containerID="2b7db18b225d27b56cb2c1ae1c5b1775fbab7c4fb30a05a478ca3a7f1b86cf0b" exitCode=0 Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.851518 5021 generic.go:334] "Generic (PLEG): container finished" podID="f6160307-8450-4492-96b7-b09c826919db" containerID="7c3e16e55e662254318a8fe6f2cfeb3a8b68be5d8afa30f23faccc2de9884c49" exitCode=2 Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.851531 5021 generic.go:334] "Generic (PLEG): container finished" podID="f6160307-8450-4492-96b7-b09c826919db" containerID="8ab34c578f1bfcdc54710774b53e185a546dbbbff8dc990f73a12bb3f2043672" exitCode=0 Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.851582 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f6160307-8450-4492-96b7-b09c826919db","Type":"ContainerDied","Data":"2b7db18b225d27b56cb2c1ae1c5b1775fbab7c4fb30a05a478ca3a7f1b86cf0b"} Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.851615 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f6160307-8450-4492-96b7-b09c826919db","Type":"ContainerDied","Data":"7c3e16e55e662254318a8fe6f2cfeb3a8b68be5d8afa30f23faccc2de9884c49"} Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.851629 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f6160307-8450-4492-96b7-b09c826919db","Type":"ContainerDied","Data":"8ab34c578f1bfcdc54710774b53e185a546dbbbff8dc990f73a12bb3f2043672"} Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.855504 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-gd72h" event={"ID":"a580a9c4-cecd-4c8c-b928-8e8d5a686f60","Type":"ContainerDied","Data":"63b20b4302ee8394511ee41680a421772e2997c387af422ae586d3b5cf3e3fcc"} Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.855539 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="63b20b4302ee8394511ee41680a421772e2997c387af422ae586d3b5cf3e3fcc" Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.855577 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.855581 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-gd72h" Jan 21 15:46:13 crc kubenswrapper[5021]: I0121 15:46:13.887407 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=4.3174770989999995 podStartE2EDuration="4.887388248s" podCreationTimestamp="2026-01-21 15:46:09 +0000 UTC" firstStartedPulling="2026-01-21 15:46:11.076958581 +0000 UTC m=+1312.612072460" lastFinishedPulling="2026-01-21 15:46:11.64686934 +0000 UTC m=+1313.181983609" observedRunningTime="2026-01-21 15:46:13.886837083 +0000 UTC m=+1315.421950972" watchObservedRunningTime="2026-01-21 15:46:13.887388248 +0000 UTC m=+1315.422502137" Jan 21 15:46:14 crc kubenswrapper[5021]: I0121 15:46:14.250102 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 21 15:46:14 crc kubenswrapper[5021]: I0121 15:46:14.251053 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="9b81a4e0-f0d5-4e03-b00f-f77dad44be9c" containerName="glance-log" containerID="cri-o://3b77e5e1aadd0eec3081d6d317d598f851b02c9638caed1693bb7b2641fb0c29" gracePeriod=30 Jan 21 15:46:14 crc kubenswrapper[5021]: I0121 15:46:14.251133 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="9b81a4e0-f0d5-4e03-b00f-f77dad44be9c" containerName="glance-httpd" containerID="cri-o://06785eb5d86910068797d6bce294bb6783c169d8956563f0bc01666a1d00f6fd" gracePeriod=30 Jan 21 15:46:14 crc kubenswrapper[5021]: I0121 15:46:14.868735 5021 generic.go:334] "Generic (PLEG): container finished" podID="9b81a4e0-f0d5-4e03-b00f-f77dad44be9c" containerID="3b77e5e1aadd0eec3081d6d317d598f851b02c9638caed1693bb7b2641fb0c29" exitCode=143 Jan 21 15:46:14 crc kubenswrapper[5021]: I0121 15:46:14.869461 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"9b81a4e0-f0d5-4e03-b00f-f77dad44be9c","Type":"ContainerDied","Data":"3b77e5e1aadd0eec3081d6d317d598f851b02c9638caed1693bb7b2641fb0c29"} Jan 21 15:46:15 crc kubenswrapper[5021]: I0121 15:46:15.921683 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 21 15:46:15 crc kubenswrapper[5021]: I0121 15:46:15.922271 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="afc0c781-38b3-4ee6-ae9d-82d8649978cd" containerName="glance-log" containerID="cri-o://0e38878917aa131287482cb95e934b688fdc45f443e728fae07ac78db6e56453" gracePeriod=30 Jan 21 15:46:15 crc kubenswrapper[5021]: I0121 15:46:15.922352 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="afc0c781-38b3-4ee6-ae9d-82d8649978cd" containerName="glance-httpd" containerID="cri-o://69c31b6122e5d3f83bed16119cc67b33a3a0a2d529b5fa15164c3ec270884e3d" gracePeriod=30 Jan 21 15:46:16 crc kubenswrapper[5021]: I0121 15:46:16.890990 5021 generic.go:334] "Generic (PLEG): container finished" podID="afc0c781-38b3-4ee6-ae9d-82d8649978cd" containerID="0e38878917aa131287482cb95e934b688fdc45f443e728fae07ac78db6e56453" exitCode=143 Jan 21 15:46:16 crc kubenswrapper[5021]: I0121 15:46:16.891103 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"afc0c781-38b3-4ee6-ae9d-82d8649978cd","Type":"ContainerDied","Data":"0e38878917aa131287482cb95e934b688fdc45f443e728fae07ac78db6e56453"} Jan 21 15:46:17 crc kubenswrapper[5021]: I0121 15:46:17.901893 5021 generic.go:334] "Generic (PLEG): container finished" podID="9b81a4e0-f0d5-4e03-b00f-f77dad44be9c" containerID="06785eb5d86910068797d6bce294bb6783c169d8956563f0bc01666a1d00f6fd" exitCode=0 Jan 21 15:46:17 crc kubenswrapper[5021]: I0121 15:46:17.901948 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"9b81a4e0-f0d5-4e03-b00f-f77dad44be9c","Type":"ContainerDied","Data":"06785eb5d86910068797d6bce294bb6783c169d8956563f0bc01666a1d00f6fd"} Jan 21 15:46:17 crc kubenswrapper[5021]: I0121 15:46:17.905753 5021 generic.go:334] "Generic (PLEG): container finished" podID="f6160307-8450-4492-96b7-b09c826919db" containerID="55f5963d406f7c0bc674fef059a34f5ad473080ab784a93b9d67f2ae188bff0b" exitCode=0 Jan 21 15:46:17 crc kubenswrapper[5021]: I0121 15:46:17.905792 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f6160307-8450-4492-96b7-b09c826919db","Type":"ContainerDied","Data":"55f5963d406f7c0bc674fef059a34f5ad473080ab784a93b9d67f2ae188bff0b"} Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.050214 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-p4kbs"] Jan 21 15:46:19 crc kubenswrapper[5021]: E0121 15:46:19.051379 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="971451eb-9adb-4bfb-b3a3-9f0e31d8ff0e" containerName="mariadb-database-create" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.051394 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="971451eb-9adb-4bfb-b3a3-9f0e31d8ff0e" containerName="mariadb-database-create" Jan 21 15:46:19 crc kubenswrapper[5021]: E0121 15:46:19.051406 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4601017a-6691-4486-8bc0-e469284ec4e2" containerName="mariadb-account-create-update" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.051413 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="4601017a-6691-4486-8bc0-e469284ec4e2" containerName="mariadb-account-create-update" Jan 21 15:46:19 crc kubenswrapper[5021]: E0121 15:46:19.051427 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c693b8ce-a34d-4a4b-b6e8-1495764299d7" containerName="mariadb-account-create-update" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.051435 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="c693b8ce-a34d-4a4b-b6e8-1495764299d7" containerName="mariadb-account-create-update" Jan 21 15:46:19 crc kubenswrapper[5021]: E0121 15:46:19.051452 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a580a9c4-cecd-4c8c-b928-8e8d5a686f60" containerName="mariadb-database-create" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.051459 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="a580a9c4-cecd-4c8c-b928-8e8d5a686f60" containerName="mariadb-database-create" Jan 21 15:46:19 crc kubenswrapper[5021]: E0121 15:46:19.051479 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ae2bbb9-1d77-4394-b7a9-30c9ce199ffe" containerName="mariadb-database-create" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.051486 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ae2bbb9-1d77-4394-b7a9-30c9ce199ffe" containerName="mariadb-database-create" Jan 21 15:46:19 crc kubenswrapper[5021]: E0121 15:46:19.051500 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02027ced-d71f-420b-9f26-10adafa52051" containerName="mariadb-account-create-update" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.051507 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="02027ced-d71f-420b-9f26-10adafa52051" containerName="mariadb-account-create-update" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.051701 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="a580a9c4-cecd-4c8c-b928-8e8d5a686f60" containerName="mariadb-database-create" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.051718 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="4601017a-6691-4486-8bc0-e469284ec4e2" containerName="mariadb-account-create-update" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.051732 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="971451eb-9adb-4bfb-b3a3-9f0e31d8ff0e" containerName="mariadb-database-create" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.051744 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="c693b8ce-a34d-4a4b-b6e8-1495764299d7" containerName="mariadb-account-create-update" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.051753 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="02027ced-d71f-420b-9f26-10adafa52051" containerName="mariadb-account-create-update" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.051767 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ae2bbb9-1d77-4394-b7a9-30c9ce199ffe" containerName="mariadb-database-create" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.052469 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-p4kbs" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.065926 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.068489 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-hkbfs" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.073435 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.075859 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-p4kbs"] Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.145024 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r5l5p\" (UniqueName: \"kubernetes.io/projected/9a0ad139-d743-47ca-aecd-ee8a7ff59a7a-kube-api-access-r5l5p\") pod \"nova-cell0-conductor-db-sync-p4kbs\" (UID: \"9a0ad139-d743-47ca-aecd-ee8a7ff59a7a\") " pod="openstack/nova-cell0-conductor-db-sync-p4kbs" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.145193 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9a0ad139-d743-47ca-aecd-ee8a7ff59a7a-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-p4kbs\" (UID: \"9a0ad139-d743-47ca-aecd-ee8a7ff59a7a\") " pod="openstack/nova-cell0-conductor-db-sync-p4kbs" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.145237 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9a0ad139-d743-47ca-aecd-ee8a7ff59a7a-scripts\") pod \"nova-cell0-conductor-db-sync-p4kbs\" (UID: \"9a0ad139-d743-47ca-aecd-ee8a7ff59a7a\") " pod="openstack/nova-cell0-conductor-db-sync-p4kbs" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.145318 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9a0ad139-d743-47ca-aecd-ee8a7ff59a7a-config-data\") pod \"nova-cell0-conductor-db-sync-p4kbs\" (UID: \"9a0ad139-d743-47ca-aecd-ee8a7ff59a7a\") " pod="openstack/nova-cell0-conductor-db-sync-p4kbs" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.247133 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9a0ad139-d743-47ca-aecd-ee8a7ff59a7a-scripts\") pod \"nova-cell0-conductor-db-sync-p4kbs\" (UID: \"9a0ad139-d743-47ca-aecd-ee8a7ff59a7a\") " pod="openstack/nova-cell0-conductor-db-sync-p4kbs" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.247220 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9a0ad139-d743-47ca-aecd-ee8a7ff59a7a-config-data\") pod \"nova-cell0-conductor-db-sync-p4kbs\" (UID: \"9a0ad139-d743-47ca-aecd-ee8a7ff59a7a\") " pod="openstack/nova-cell0-conductor-db-sync-p4kbs" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.247326 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r5l5p\" (UniqueName: \"kubernetes.io/projected/9a0ad139-d743-47ca-aecd-ee8a7ff59a7a-kube-api-access-r5l5p\") pod \"nova-cell0-conductor-db-sync-p4kbs\" (UID: \"9a0ad139-d743-47ca-aecd-ee8a7ff59a7a\") " pod="openstack/nova-cell0-conductor-db-sync-p4kbs" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.247434 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9a0ad139-d743-47ca-aecd-ee8a7ff59a7a-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-p4kbs\" (UID: \"9a0ad139-d743-47ca-aecd-ee8a7ff59a7a\") " pod="openstack/nova-cell0-conductor-db-sync-p4kbs" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.255812 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9a0ad139-d743-47ca-aecd-ee8a7ff59a7a-scripts\") pod \"nova-cell0-conductor-db-sync-p4kbs\" (UID: \"9a0ad139-d743-47ca-aecd-ee8a7ff59a7a\") " pod="openstack/nova-cell0-conductor-db-sync-p4kbs" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.255858 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9a0ad139-d743-47ca-aecd-ee8a7ff59a7a-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-p4kbs\" (UID: \"9a0ad139-d743-47ca-aecd-ee8a7ff59a7a\") " pod="openstack/nova-cell0-conductor-db-sync-p4kbs" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.258697 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9a0ad139-d743-47ca-aecd-ee8a7ff59a7a-config-data\") pod \"nova-cell0-conductor-db-sync-p4kbs\" (UID: \"9a0ad139-d743-47ca-aecd-ee8a7ff59a7a\") " pod="openstack/nova-cell0-conductor-db-sync-p4kbs" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.270955 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r5l5p\" (UniqueName: \"kubernetes.io/projected/9a0ad139-d743-47ca-aecd-ee8a7ff59a7a-kube-api-access-r5l5p\") pod \"nova-cell0-conductor-db-sync-p4kbs\" (UID: \"9a0ad139-d743-47ca-aecd-ee8a7ff59a7a\") " pod="openstack/nova-cell0-conductor-db-sync-p4kbs" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.397373 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.404407 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.406254 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-p4kbs" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.452054 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"9b81a4e0-f0d5-4e03-b00f-f77dad44be9c\" (UID: \"9b81a4e0-f0d5-4e03-b00f-f77dad44be9c\") " Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.452130 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b81a4e0-f0d5-4e03-b00f-f77dad44be9c-config-data\") pod \"9b81a4e0-f0d5-4e03-b00f-f77dad44be9c\" (UID: \"9b81a4e0-f0d5-4e03-b00f-f77dad44be9c\") " Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.452202 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6160307-8450-4492-96b7-b09c826919db-config-data\") pod \"f6160307-8450-4492-96b7-b09c826919db\" (UID: \"f6160307-8450-4492-96b7-b09c826919db\") " Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.452233 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b81a4e0-f0d5-4e03-b00f-f77dad44be9c-combined-ca-bundle\") pod \"9b81a4e0-f0d5-4e03-b00f-f77dad44be9c\" (UID: \"9b81a4e0-f0d5-4e03-b00f-f77dad44be9c\") " Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.452265 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6160307-8450-4492-96b7-b09c826919db-combined-ca-bundle\") pod \"f6160307-8450-4492-96b7-b09c826919db\" (UID: \"f6160307-8450-4492-96b7-b09c826919db\") " Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.452394 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9b81a4e0-f0d5-4e03-b00f-f77dad44be9c-logs\") pod \"9b81a4e0-f0d5-4e03-b00f-f77dad44be9c\" (UID: \"9b81a4e0-f0d5-4e03-b00f-f77dad44be9c\") " Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.452430 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f6160307-8450-4492-96b7-b09c826919db-sg-core-conf-yaml\") pod \"f6160307-8450-4492-96b7-b09c826919db\" (UID: \"f6160307-8450-4492-96b7-b09c826919db\") " Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.452471 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f6160307-8450-4492-96b7-b09c826919db-scripts\") pod \"f6160307-8450-4492-96b7-b09c826919db\" (UID: \"f6160307-8450-4492-96b7-b09c826919db\") " Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.452515 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f6160307-8450-4492-96b7-b09c826919db-run-httpd\") pod \"f6160307-8450-4492-96b7-b09c826919db\" (UID: \"f6160307-8450-4492-96b7-b09c826919db\") " Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.452553 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c8n6v\" (UniqueName: \"kubernetes.io/projected/f6160307-8450-4492-96b7-b09c826919db-kube-api-access-c8n6v\") pod \"f6160307-8450-4492-96b7-b09c826919db\" (UID: \"f6160307-8450-4492-96b7-b09c826919db\") " Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.452580 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9b81a4e0-f0d5-4e03-b00f-f77dad44be9c-public-tls-certs\") pod \"9b81a4e0-f0d5-4e03-b00f-f77dad44be9c\" (UID: \"9b81a4e0-f0d5-4e03-b00f-f77dad44be9c\") " Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.452604 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f6160307-8450-4492-96b7-b09c826919db-log-httpd\") pod \"f6160307-8450-4492-96b7-b09c826919db\" (UID: \"f6160307-8450-4492-96b7-b09c826919db\") " Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.452658 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-22m72\" (UniqueName: \"kubernetes.io/projected/9b81a4e0-f0d5-4e03-b00f-f77dad44be9c-kube-api-access-22m72\") pod \"9b81a4e0-f0d5-4e03-b00f-f77dad44be9c\" (UID: \"9b81a4e0-f0d5-4e03-b00f-f77dad44be9c\") " Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.452692 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9b81a4e0-f0d5-4e03-b00f-f77dad44be9c-httpd-run\") pod \"9b81a4e0-f0d5-4e03-b00f-f77dad44be9c\" (UID: \"9b81a4e0-f0d5-4e03-b00f-f77dad44be9c\") " Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.452747 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9b81a4e0-f0d5-4e03-b00f-f77dad44be9c-scripts\") pod \"9b81a4e0-f0d5-4e03-b00f-f77dad44be9c\" (UID: \"9b81a4e0-f0d5-4e03-b00f-f77dad44be9c\") " Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.454047 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f6160307-8450-4492-96b7-b09c826919db-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "f6160307-8450-4492-96b7-b09c826919db" (UID: "f6160307-8450-4492-96b7-b09c826919db"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.454522 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f6160307-8450-4492-96b7-b09c826919db-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "f6160307-8450-4492-96b7-b09c826919db" (UID: "f6160307-8450-4492-96b7-b09c826919db"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.455802 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9b81a4e0-f0d5-4e03-b00f-f77dad44be9c-logs" (OuterVolumeSpecName: "logs") pod "9b81a4e0-f0d5-4e03-b00f-f77dad44be9c" (UID: "9b81a4e0-f0d5-4e03-b00f-f77dad44be9c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.456426 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9b81a4e0-f0d5-4e03-b00f-f77dad44be9c-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "9b81a4e0-f0d5-4e03-b00f-f77dad44be9c" (UID: "9b81a4e0-f0d5-4e03-b00f-f77dad44be9c"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.472108 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f6160307-8450-4492-96b7-b09c826919db-scripts" (OuterVolumeSpecName: "scripts") pod "f6160307-8450-4492-96b7-b09c826919db" (UID: "f6160307-8450-4492-96b7-b09c826919db"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.477284 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b81a4e0-f0d5-4e03-b00f-f77dad44be9c-kube-api-access-22m72" (OuterVolumeSpecName: "kube-api-access-22m72") pod "9b81a4e0-f0d5-4e03-b00f-f77dad44be9c" (UID: "9b81a4e0-f0d5-4e03-b00f-f77dad44be9c"). InnerVolumeSpecName "kube-api-access-22m72". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.477652 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "glance") pod "9b81a4e0-f0d5-4e03-b00f-f77dad44be9c" (UID: "9b81a4e0-f0d5-4e03-b00f-f77dad44be9c"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.478296 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b81a4e0-f0d5-4e03-b00f-f77dad44be9c-scripts" (OuterVolumeSpecName: "scripts") pod "9b81a4e0-f0d5-4e03-b00f-f77dad44be9c" (UID: "9b81a4e0-f0d5-4e03-b00f-f77dad44be9c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.482118 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f6160307-8450-4492-96b7-b09c826919db-kube-api-access-c8n6v" (OuterVolumeSpecName: "kube-api-access-c8n6v") pod "f6160307-8450-4492-96b7-b09c826919db" (UID: "f6160307-8450-4492-96b7-b09c826919db"). InnerVolumeSpecName "kube-api-access-c8n6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.521700 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f6160307-8450-4492-96b7-b09c826919db-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "f6160307-8450-4492-96b7-b09c826919db" (UID: "f6160307-8450-4492-96b7-b09c826919db"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.535942 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b81a4e0-f0d5-4e03-b00f-f77dad44be9c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9b81a4e0-f0d5-4e03-b00f-f77dad44be9c" (UID: "9b81a4e0-f0d5-4e03-b00f-f77dad44be9c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.555449 5021 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9b81a4e0-f0d5-4e03-b00f-f77dad44be9c-logs\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.555486 5021 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f6160307-8450-4492-96b7-b09c826919db-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.555496 5021 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f6160307-8450-4492-96b7-b09c826919db-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.555504 5021 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f6160307-8450-4492-96b7-b09c826919db-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.555514 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c8n6v\" (UniqueName: \"kubernetes.io/projected/f6160307-8450-4492-96b7-b09c826919db-kube-api-access-c8n6v\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.555523 5021 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f6160307-8450-4492-96b7-b09c826919db-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.555531 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-22m72\" (UniqueName: \"kubernetes.io/projected/9b81a4e0-f0d5-4e03-b00f-f77dad44be9c-kube-api-access-22m72\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.555539 5021 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9b81a4e0-f0d5-4e03-b00f-f77dad44be9c-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.555547 5021 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9b81a4e0-f0d5-4e03-b00f-f77dad44be9c-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.555578 5021 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.555587 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b81a4e0-f0d5-4e03-b00f-f77dad44be9c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.558335 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b81a4e0-f0d5-4e03-b00f-f77dad44be9c-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "9b81a4e0-f0d5-4e03-b00f-f77dad44be9c" (UID: "9b81a4e0-f0d5-4e03-b00f-f77dad44be9c"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.579318 5021 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.590002 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f6160307-8450-4492-96b7-b09c826919db-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f6160307-8450-4492-96b7-b09c826919db" (UID: "f6160307-8450-4492-96b7-b09c826919db"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.590106 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b81a4e0-f0d5-4e03-b00f-f77dad44be9c-config-data" (OuterVolumeSpecName: "config-data") pod "9b81a4e0-f0d5-4e03-b00f-f77dad44be9c" (UID: "9b81a4e0-f0d5-4e03-b00f-f77dad44be9c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.631059 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f6160307-8450-4492-96b7-b09c826919db-config-data" (OuterVolumeSpecName: "config-data") pod "f6160307-8450-4492-96b7-b09c826919db" (UID: "f6160307-8450-4492-96b7-b09c826919db"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.661092 5021 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.661129 5021 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b81a4e0-f0d5-4e03-b00f-f77dad44be9c-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.661183 5021 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6160307-8450-4492-96b7-b09c826919db-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.661193 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6160307-8450-4492-96b7-b09c826919db-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.661203 5021 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9b81a4e0-f0d5-4e03-b00f-f77dad44be9c-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.699480 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.762460 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ft5j8\" (UniqueName: \"kubernetes.io/projected/afc0c781-38b3-4ee6-ae9d-82d8649978cd-kube-api-access-ft5j8\") pod \"afc0c781-38b3-4ee6-ae9d-82d8649978cd\" (UID: \"afc0c781-38b3-4ee6-ae9d-82d8649978cd\") " Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.762556 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/afc0c781-38b3-4ee6-ae9d-82d8649978cd-httpd-run\") pod \"afc0c781-38b3-4ee6-ae9d-82d8649978cd\" (UID: \"afc0c781-38b3-4ee6-ae9d-82d8649978cd\") " Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.762594 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/afc0c781-38b3-4ee6-ae9d-82d8649978cd-scripts\") pod \"afc0c781-38b3-4ee6-ae9d-82d8649978cd\" (UID: \"afc0c781-38b3-4ee6-ae9d-82d8649978cd\") " Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.762717 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/afc0c781-38b3-4ee6-ae9d-82d8649978cd-config-data\") pod \"afc0c781-38b3-4ee6-ae9d-82d8649978cd\" (UID: \"afc0c781-38b3-4ee6-ae9d-82d8649978cd\") " Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.762743 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"afc0c781-38b3-4ee6-ae9d-82d8649978cd\" (UID: \"afc0c781-38b3-4ee6-ae9d-82d8649978cd\") " Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.762810 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/afc0c781-38b3-4ee6-ae9d-82d8649978cd-internal-tls-certs\") pod \"afc0c781-38b3-4ee6-ae9d-82d8649978cd\" (UID: \"afc0c781-38b3-4ee6-ae9d-82d8649978cd\") " Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.762843 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/afc0c781-38b3-4ee6-ae9d-82d8649978cd-logs\") pod \"afc0c781-38b3-4ee6-ae9d-82d8649978cd\" (UID: \"afc0c781-38b3-4ee6-ae9d-82d8649978cd\") " Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.762886 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/afc0c781-38b3-4ee6-ae9d-82d8649978cd-combined-ca-bundle\") pod \"afc0c781-38b3-4ee6-ae9d-82d8649978cd\" (UID: \"afc0c781-38b3-4ee6-ae9d-82d8649978cd\") " Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.763506 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/afc0c781-38b3-4ee6-ae9d-82d8649978cd-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "afc0c781-38b3-4ee6-ae9d-82d8649978cd" (UID: "afc0c781-38b3-4ee6-ae9d-82d8649978cd"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.764363 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/afc0c781-38b3-4ee6-ae9d-82d8649978cd-logs" (OuterVolumeSpecName: "logs") pod "afc0c781-38b3-4ee6-ae9d-82d8649978cd" (UID: "afc0c781-38b3-4ee6-ae9d-82d8649978cd"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.772391 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/afc0c781-38b3-4ee6-ae9d-82d8649978cd-kube-api-access-ft5j8" (OuterVolumeSpecName: "kube-api-access-ft5j8") pod "afc0c781-38b3-4ee6-ae9d-82d8649978cd" (UID: "afc0c781-38b3-4ee6-ae9d-82d8649978cd"). InnerVolumeSpecName "kube-api-access-ft5j8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.772486 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/afc0c781-38b3-4ee6-ae9d-82d8649978cd-scripts" (OuterVolumeSpecName: "scripts") pod "afc0c781-38b3-4ee6-ae9d-82d8649978cd" (UID: "afc0c781-38b3-4ee6-ae9d-82d8649978cd"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.772576 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "glance") pod "afc0c781-38b3-4ee6-ae9d-82d8649978cd" (UID: "afc0c781-38b3-4ee6-ae9d-82d8649978cd"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.827650 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/afc0c781-38b3-4ee6-ae9d-82d8649978cd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "afc0c781-38b3-4ee6-ae9d-82d8649978cd" (UID: "afc0c781-38b3-4ee6-ae9d-82d8649978cd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.840149 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/afc0c781-38b3-4ee6-ae9d-82d8649978cd-config-data" (OuterVolumeSpecName: "config-data") pod "afc0c781-38b3-4ee6-ae9d-82d8649978cd" (UID: "afc0c781-38b3-4ee6-ae9d-82d8649978cd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.849104 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/afc0c781-38b3-4ee6-ae9d-82d8649978cd-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "afc0c781-38b3-4ee6-ae9d-82d8649978cd" (UID: "afc0c781-38b3-4ee6-ae9d-82d8649978cd"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.865372 5021 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/afc0c781-38b3-4ee6-ae9d-82d8649978cd-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.865410 5021 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/afc0c781-38b3-4ee6-ae9d-82d8649978cd-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.865424 5021 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/afc0c781-38b3-4ee6-ae9d-82d8649978cd-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.865456 5021 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.865468 5021 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/afc0c781-38b3-4ee6-ae9d-82d8649978cd-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.865480 5021 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/afc0c781-38b3-4ee6-ae9d-82d8649978cd-logs\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.865490 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/afc0c781-38b3-4ee6-ae9d-82d8649978cd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.865500 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ft5j8\" (UniqueName: \"kubernetes.io/projected/afc0c781-38b3-4ee6-ae9d-82d8649978cd-kube-api-access-ft5j8\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.902122 5021 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.942483 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"9b81a4e0-f0d5-4e03-b00f-f77dad44be9c","Type":"ContainerDied","Data":"5b59b99a76c5b195a185fd7112eb954c9a0dcbaaa1e77c98082d68ebfe46d48d"} Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.942766 5021 scope.go:117] "RemoveContainer" containerID="06785eb5d86910068797d6bce294bb6783c169d8956563f0bc01666a1d00f6fd" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.943063 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.949256 5021 generic.go:334] "Generic (PLEG): container finished" podID="afc0c781-38b3-4ee6-ae9d-82d8649978cd" containerID="69c31b6122e5d3f83bed16119cc67b33a3a0a2d529b5fa15164c3ec270884e3d" exitCode=0 Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.949346 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"afc0c781-38b3-4ee6-ae9d-82d8649978cd","Type":"ContainerDied","Data":"69c31b6122e5d3f83bed16119cc67b33a3a0a2d529b5fa15164c3ec270884e3d"} Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.949383 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"afc0c781-38b3-4ee6-ae9d-82d8649978cd","Type":"ContainerDied","Data":"430a5f5e2132c63531aa5e0f7b4202e0c48b4cda0f203ee49ff436a3a55736a3"} Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.949493 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.967596 5021 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.996222 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f6160307-8450-4492-96b7-b09c826919db","Type":"ContainerDied","Data":"32c70c6ea9957022ef82fce0b8811c081959fd09b585ffdd8ac1146857cce47c"} Jan 21 15:46:19 crc kubenswrapper[5021]: I0121 15:46:19.996270 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.023299 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.048052 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.076062 5021 scope.go:117] "RemoveContainer" containerID="3b77e5e1aadd0eec3081d6d317d598f851b02c9638caed1693bb7b2641fb0c29" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.082266 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 21 15:46:20 crc kubenswrapper[5021]: E0121 15:46:20.082644 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6160307-8450-4492-96b7-b09c826919db" containerName="ceilometer-central-agent" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.082662 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6160307-8450-4492-96b7-b09c826919db" containerName="ceilometer-central-agent" Jan 21 15:46:20 crc kubenswrapper[5021]: E0121 15:46:20.082679 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b81a4e0-f0d5-4e03-b00f-f77dad44be9c" containerName="glance-httpd" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.082686 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b81a4e0-f0d5-4e03-b00f-f77dad44be9c" containerName="glance-httpd" Jan 21 15:46:20 crc kubenswrapper[5021]: E0121 15:46:20.082700 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6160307-8450-4492-96b7-b09c826919db" containerName="proxy-httpd" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.082706 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6160307-8450-4492-96b7-b09c826919db" containerName="proxy-httpd" Jan 21 15:46:20 crc kubenswrapper[5021]: E0121 15:46:20.082724 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6160307-8450-4492-96b7-b09c826919db" containerName="sg-core" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.082730 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6160307-8450-4492-96b7-b09c826919db" containerName="sg-core" Jan 21 15:46:20 crc kubenswrapper[5021]: E0121 15:46:20.082741 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b81a4e0-f0d5-4e03-b00f-f77dad44be9c" containerName="glance-log" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.082750 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b81a4e0-f0d5-4e03-b00f-f77dad44be9c" containerName="glance-log" Jan 21 15:46:20 crc kubenswrapper[5021]: E0121 15:46:20.082765 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="afc0c781-38b3-4ee6-ae9d-82d8649978cd" containerName="glance-httpd" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.082771 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="afc0c781-38b3-4ee6-ae9d-82d8649978cd" containerName="glance-httpd" Jan 21 15:46:20 crc kubenswrapper[5021]: E0121 15:46:20.082779 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6160307-8450-4492-96b7-b09c826919db" containerName="ceilometer-notification-agent" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.082785 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6160307-8450-4492-96b7-b09c826919db" containerName="ceilometer-notification-agent" Jan 21 15:46:20 crc kubenswrapper[5021]: E0121 15:46:20.082797 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="afc0c781-38b3-4ee6-ae9d-82d8649978cd" containerName="glance-log" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.082803 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="afc0c781-38b3-4ee6-ae9d-82d8649978cd" containerName="glance-log" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.082979 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="f6160307-8450-4492-96b7-b09c826919db" containerName="proxy-httpd" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.082992 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="f6160307-8450-4492-96b7-b09c826919db" containerName="ceilometer-central-agent" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.083003 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="afc0c781-38b3-4ee6-ae9d-82d8649978cd" containerName="glance-httpd" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.083017 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b81a4e0-f0d5-4e03-b00f-f77dad44be9c" containerName="glance-log" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.083029 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="f6160307-8450-4492-96b7-b09c826919db" containerName="ceilometer-notification-agent" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.083036 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="f6160307-8450-4492-96b7-b09c826919db" containerName="sg-core" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.083048 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b81a4e0-f0d5-4e03-b00f-f77dad44be9c" containerName="glance-httpd" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.083055 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="afc0c781-38b3-4ee6-ae9d-82d8649978cd" containerName="glance-log" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.083936 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.087236 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.103298 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.103343 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-d767q" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.103700 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.109545 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-p4kbs"] Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.140984 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.161130 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.173830 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0415e622-e0cf-4097-865a-a0970f2acc07-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"0415e622-e0cf-4097-865a-a0970f2acc07\") " pod="openstack/glance-default-external-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.173919 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wgmwm\" (UniqueName: \"kubernetes.io/projected/0415e622-e0cf-4097-865a-a0970f2acc07-kube-api-access-wgmwm\") pod \"glance-default-external-api-0\" (UID: \"0415e622-e0cf-4097-865a-a0970f2acc07\") " pod="openstack/glance-default-external-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.173947 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0415e622-e0cf-4097-865a-a0970f2acc07-scripts\") pod \"glance-default-external-api-0\" (UID: \"0415e622-e0cf-4097-865a-a0970f2acc07\") " pod="openstack/glance-default-external-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.173994 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"0415e622-e0cf-4097-865a-a0970f2acc07\") " pod="openstack/glance-default-external-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.174027 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0415e622-e0cf-4097-865a-a0970f2acc07-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"0415e622-e0cf-4097-865a-a0970f2acc07\") " pod="openstack/glance-default-external-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.174095 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0415e622-e0cf-4097-865a-a0970f2acc07-logs\") pod \"glance-default-external-api-0\" (UID: \"0415e622-e0cf-4097-865a-a0970f2acc07\") " pod="openstack/glance-default-external-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.174121 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0415e622-e0cf-4097-865a-a0970f2acc07-config-data\") pod \"glance-default-external-api-0\" (UID: \"0415e622-e0cf-4097-865a-a0970f2acc07\") " pod="openstack/glance-default-external-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.174153 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0415e622-e0cf-4097-865a-a0970f2acc07-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"0415e622-e0cf-4097-865a-a0970f2acc07\") " pod="openstack/glance-default-external-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.182246 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.188071 5021 scope.go:117] "RemoveContainer" containerID="69c31b6122e5d3f83bed16119cc67b33a3a0a2d529b5fa15164c3ec270884e3d" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.210486 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.257834 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.280107 5021 scope.go:117] "RemoveContainer" containerID="0e38878917aa131287482cb95e934b688fdc45f443e728fae07ac78db6e56453" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.300995 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.303142 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.306359 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.306551 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.307086 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0415e622-e0cf-4097-865a-a0970f2acc07-logs\") pod \"glance-default-external-api-0\" (UID: \"0415e622-e0cf-4097-865a-a0970f2acc07\") " pod="openstack/glance-default-external-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.307144 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0415e622-e0cf-4097-865a-a0970f2acc07-config-data\") pod \"glance-default-external-api-0\" (UID: \"0415e622-e0cf-4097-865a-a0970f2acc07\") " pod="openstack/glance-default-external-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.307204 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0415e622-e0cf-4097-865a-a0970f2acc07-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"0415e622-e0cf-4097-865a-a0970f2acc07\") " pod="openstack/glance-default-external-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.307258 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0415e622-e0cf-4097-865a-a0970f2acc07-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"0415e622-e0cf-4097-865a-a0970f2acc07\") " pod="openstack/glance-default-external-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.307318 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wgmwm\" (UniqueName: \"kubernetes.io/projected/0415e622-e0cf-4097-865a-a0970f2acc07-kube-api-access-wgmwm\") pod \"glance-default-external-api-0\" (UID: \"0415e622-e0cf-4097-865a-a0970f2acc07\") " pod="openstack/glance-default-external-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.307353 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0415e622-e0cf-4097-865a-a0970f2acc07-scripts\") pod \"glance-default-external-api-0\" (UID: \"0415e622-e0cf-4097-865a-a0970f2acc07\") " pod="openstack/glance-default-external-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.307389 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"0415e622-e0cf-4097-865a-a0970f2acc07\") " pod="openstack/glance-default-external-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.307427 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0415e622-e0cf-4097-865a-a0970f2acc07-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"0415e622-e0cf-4097-865a-a0970f2acc07\") " pod="openstack/glance-default-external-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.308762 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.309752 5021 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"0415e622-e0cf-4097-865a-a0970f2acc07\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/glance-default-external-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.325734 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0415e622-e0cf-4097-865a-a0970f2acc07-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"0415e622-e0cf-4097-865a-a0970f2acc07\") " pod="openstack/glance-default-external-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.341130 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0415e622-e0cf-4097-865a-a0970f2acc07-logs\") pod \"glance-default-external-api-0\" (UID: \"0415e622-e0cf-4097-865a-a0970f2acc07\") " pod="openstack/glance-default-external-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.341953 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.343427 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.355524 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.357071 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0415e622-e0cf-4097-865a-a0970f2acc07-scripts\") pod \"glance-default-external-api-0\" (UID: \"0415e622-e0cf-4097-865a-a0970f2acc07\") " pod="openstack/glance-default-external-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.366968 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.370699 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.380271 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wgmwm\" (UniqueName: \"kubernetes.io/projected/0415e622-e0cf-4097-865a-a0970f2acc07-kube-api-access-wgmwm\") pod \"glance-default-external-api-0\" (UID: \"0415e622-e0cf-4097-865a-a0970f2acc07\") " pod="openstack/glance-default-external-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.386441 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0415e622-e0cf-4097-865a-a0970f2acc07-config-data\") pod \"glance-default-external-api-0\" (UID: \"0415e622-e0cf-4097-865a-a0970f2acc07\") " pod="openstack/glance-default-external-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.389847 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"0415e622-e0cf-4097-865a-a0970f2acc07\") " pod="openstack/glance-default-external-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.390887 5021 scope.go:117] "RemoveContainer" containerID="69c31b6122e5d3f83bed16119cc67b33a3a0a2d529b5fa15164c3ec270884e3d" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.392691 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0415e622-e0cf-4097-865a-a0970f2acc07-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"0415e622-e0cf-4097-865a-a0970f2acc07\") " pod="openstack/glance-default-external-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: E0121 15:46:20.395518 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"69c31b6122e5d3f83bed16119cc67b33a3a0a2d529b5fa15164c3ec270884e3d\": container with ID starting with 69c31b6122e5d3f83bed16119cc67b33a3a0a2d529b5fa15164c3ec270884e3d not found: ID does not exist" containerID="69c31b6122e5d3f83bed16119cc67b33a3a0a2d529b5fa15164c3ec270884e3d" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.395556 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"69c31b6122e5d3f83bed16119cc67b33a3a0a2d529b5fa15164c3ec270884e3d"} err="failed to get container status \"69c31b6122e5d3f83bed16119cc67b33a3a0a2d529b5fa15164c3ec270884e3d\": rpc error: code = NotFound desc = could not find container \"69c31b6122e5d3f83bed16119cc67b33a3a0a2d529b5fa15164c3ec270884e3d\": container with ID starting with 69c31b6122e5d3f83bed16119cc67b33a3a0a2d529b5fa15164c3ec270884e3d not found: ID does not exist" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.395580 5021 scope.go:117] "RemoveContainer" containerID="0e38878917aa131287482cb95e934b688fdc45f443e728fae07ac78db6e56453" Jan 21 15:46:20 crc kubenswrapper[5021]: E0121 15:46:20.396552 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0e38878917aa131287482cb95e934b688fdc45f443e728fae07ac78db6e56453\": container with ID starting with 0e38878917aa131287482cb95e934b688fdc45f443e728fae07ac78db6e56453 not found: ID does not exist" containerID="0e38878917aa131287482cb95e934b688fdc45f443e728fae07ac78db6e56453" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.396574 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0e38878917aa131287482cb95e934b688fdc45f443e728fae07ac78db6e56453"} err="failed to get container status \"0e38878917aa131287482cb95e934b688fdc45f443e728fae07ac78db6e56453\": rpc error: code = NotFound desc = could not find container \"0e38878917aa131287482cb95e934b688fdc45f443e728fae07ac78db6e56453\": container with ID starting with 0e38878917aa131287482cb95e934b688fdc45f443e728fae07ac78db6e56453 not found: ID does not exist" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.396589 5021 scope.go:117] "RemoveContainer" containerID="2b7db18b225d27b56cb2c1ae1c5b1775fbab7c4fb30a05a478ca3a7f1b86cf0b" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.408830 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/446aadfb-ac91-4335-9bac-4f8d7663ab6a-scripts\") pod \"glance-default-internal-api-0\" (UID: \"446aadfb-ac91-4335-9bac-4f8d7663ab6a\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.408884 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-internal-api-0\" (UID: \"446aadfb-ac91-4335-9bac-4f8d7663ab6a\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.408935 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/646e47e3-7614-4a60-99e0-ce4718cece7c-log-httpd\") pod \"ceilometer-0\" (UID: \"646e47e3-7614-4a60-99e0-ce4718cece7c\") " pod="openstack/ceilometer-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.408961 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/646e47e3-7614-4a60-99e0-ce4718cece7c-run-httpd\") pod \"ceilometer-0\" (UID: \"646e47e3-7614-4a60-99e0-ce4718cece7c\") " pod="openstack/ceilometer-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.408989 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/446aadfb-ac91-4335-9bac-4f8d7663ab6a-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"446aadfb-ac91-4335-9bac-4f8d7663ab6a\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.409023 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/646e47e3-7614-4a60-99e0-ce4718cece7c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"646e47e3-7614-4a60-99e0-ce4718cece7c\") " pod="openstack/ceilometer-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.409082 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/646e47e3-7614-4a60-99e0-ce4718cece7c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"646e47e3-7614-4a60-99e0-ce4718cece7c\") " pod="openstack/ceilometer-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.409117 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/446aadfb-ac91-4335-9bac-4f8d7663ab6a-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"446aadfb-ac91-4335-9bac-4f8d7663ab6a\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.409150 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/646e47e3-7614-4a60-99e0-ce4718cece7c-scripts\") pod \"ceilometer-0\" (UID: \"646e47e3-7614-4a60-99e0-ce4718cece7c\") " pod="openstack/ceilometer-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.409331 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xkvlq\" (UniqueName: \"kubernetes.io/projected/646e47e3-7614-4a60-99e0-ce4718cece7c-kube-api-access-xkvlq\") pod \"ceilometer-0\" (UID: \"646e47e3-7614-4a60-99e0-ce4718cece7c\") " pod="openstack/ceilometer-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.409366 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/446aadfb-ac91-4335-9bac-4f8d7663ab6a-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"446aadfb-ac91-4335-9bac-4f8d7663ab6a\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.409388 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/646e47e3-7614-4a60-99e0-ce4718cece7c-config-data\") pod \"ceilometer-0\" (UID: \"646e47e3-7614-4a60-99e0-ce4718cece7c\") " pod="openstack/ceilometer-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.409423 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/446aadfb-ac91-4335-9bac-4f8d7663ab6a-logs\") pod \"glance-default-internal-api-0\" (UID: \"446aadfb-ac91-4335-9bac-4f8d7663ab6a\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.409498 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/446aadfb-ac91-4335-9bac-4f8d7663ab6a-config-data\") pod \"glance-default-internal-api-0\" (UID: \"446aadfb-ac91-4335-9bac-4f8d7663ab6a\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.409522 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v8rs7\" (UniqueName: \"kubernetes.io/projected/446aadfb-ac91-4335-9bac-4f8d7663ab6a-kube-api-access-v8rs7\") pod \"glance-default-internal-api-0\" (UID: \"446aadfb-ac91-4335-9bac-4f8d7663ab6a\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.409572 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/646e47e3-7614-4a60-99e0-ce4718cece7c-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"646e47e3-7614-4a60-99e0-ce4718cece7c\") " pod="openstack/ceilometer-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.411653 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0415e622-e0cf-4097-865a-a0970f2acc07-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"0415e622-e0cf-4097-865a-a0970f2acc07\") " pod="openstack/glance-default-external-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.417817 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.421136 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.515451 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v8rs7\" (UniqueName: \"kubernetes.io/projected/446aadfb-ac91-4335-9bac-4f8d7663ab6a-kube-api-access-v8rs7\") pod \"glance-default-internal-api-0\" (UID: \"446aadfb-ac91-4335-9bac-4f8d7663ab6a\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.515505 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/446aadfb-ac91-4335-9bac-4f8d7663ab6a-config-data\") pod \"glance-default-internal-api-0\" (UID: \"446aadfb-ac91-4335-9bac-4f8d7663ab6a\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.515576 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/646e47e3-7614-4a60-99e0-ce4718cece7c-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"646e47e3-7614-4a60-99e0-ce4718cece7c\") " pod="openstack/ceilometer-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.515610 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/446aadfb-ac91-4335-9bac-4f8d7663ab6a-scripts\") pod \"glance-default-internal-api-0\" (UID: \"446aadfb-ac91-4335-9bac-4f8d7663ab6a\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.515631 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-internal-api-0\" (UID: \"446aadfb-ac91-4335-9bac-4f8d7663ab6a\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.515654 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/646e47e3-7614-4a60-99e0-ce4718cece7c-log-httpd\") pod \"ceilometer-0\" (UID: \"646e47e3-7614-4a60-99e0-ce4718cece7c\") " pod="openstack/ceilometer-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.515681 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/646e47e3-7614-4a60-99e0-ce4718cece7c-run-httpd\") pod \"ceilometer-0\" (UID: \"646e47e3-7614-4a60-99e0-ce4718cece7c\") " pod="openstack/ceilometer-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.515708 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/446aadfb-ac91-4335-9bac-4f8d7663ab6a-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"446aadfb-ac91-4335-9bac-4f8d7663ab6a\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.515742 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/646e47e3-7614-4a60-99e0-ce4718cece7c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"646e47e3-7614-4a60-99e0-ce4718cece7c\") " pod="openstack/ceilometer-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.515812 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/646e47e3-7614-4a60-99e0-ce4718cece7c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"646e47e3-7614-4a60-99e0-ce4718cece7c\") " pod="openstack/ceilometer-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.515841 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/446aadfb-ac91-4335-9bac-4f8d7663ab6a-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"446aadfb-ac91-4335-9bac-4f8d7663ab6a\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.515871 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/646e47e3-7614-4a60-99e0-ce4718cece7c-scripts\") pod \"ceilometer-0\" (UID: \"646e47e3-7614-4a60-99e0-ce4718cece7c\") " pod="openstack/ceilometer-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.515896 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xkvlq\" (UniqueName: \"kubernetes.io/projected/646e47e3-7614-4a60-99e0-ce4718cece7c-kube-api-access-xkvlq\") pod \"ceilometer-0\" (UID: \"646e47e3-7614-4a60-99e0-ce4718cece7c\") " pod="openstack/ceilometer-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.515939 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/446aadfb-ac91-4335-9bac-4f8d7663ab6a-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"446aadfb-ac91-4335-9bac-4f8d7663ab6a\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.515959 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/646e47e3-7614-4a60-99e0-ce4718cece7c-config-data\") pod \"ceilometer-0\" (UID: \"646e47e3-7614-4a60-99e0-ce4718cece7c\") " pod="openstack/ceilometer-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.515998 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/446aadfb-ac91-4335-9bac-4f8d7663ab6a-logs\") pod \"glance-default-internal-api-0\" (UID: \"446aadfb-ac91-4335-9bac-4f8d7663ab6a\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.522570 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/646e47e3-7614-4a60-99e0-ce4718cece7c-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"646e47e3-7614-4a60-99e0-ce4718cece7c\") " pod="openstack/ceilometer-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.523508 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/446aadfb-ac91-4335-9bac-4f8d7663ab6a-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"446aadfb-ac91-4335-9bac-4f8d7663ab6a\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.523840 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/646e47e3-7614-4a60-99e0-ce4718cece7c-log-httpd\") pod \"ceilometer-0\" (UID: \"646e47e3-7614-4a60-99e0-ce4718cece7c\") " pod="openstack/ceilometer-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.526359 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/446aadfb-ac91-4335-9bac-4f8d7663ab6a-logs\") pod \"glance-default-internal-api-0\" (UID: \"446aadfb-ac91-4335-9bac-4f8d7663ab6a\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.526996 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/646e47e3-7614-4a60-99e0-ce4718cece7c-run-httpd\") pod \"ceilometer-0\" (UID: \"646e47e3-7614-4a60-99e0-ce4718cece7c\") " pod="openstack/ceilometer-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.527078 5021 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-internal-api-0\" (UID: \"446aadfb-ac91-4335-9bac-4f8d7663ab6a\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/glance-default-internal-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.539855 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/646e47e3-7614-4a60-99e0-ce4718cece7c-config-data\") pod \"ceilometer-0\" (UID: \"646e47e3-7614-4a60-99e0-ce4718cece7c\") " pod="openstack/ceilometer-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.542140 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/646e47e3-7614-4a60-99e0-ce4718cece7c-scripts\") pod \"ceilometer-0\" (UID: \"646e47e3-7614-4a60-99e0-ce4718cece7c\") " pod="openstack/ceilometer-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.542502 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/446aadfb-ac91-4335-9bac-4f8d7663ab6a-scripts\") pod \"glance-default-internal-api-0\" (UID: \"446aadfb-ac91-4335-9bac-4f8d7663ab6a\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.543631 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/446aadfb-ac91-4335-9bac-4f8d7663ab6a-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"446aadfb-ac91-4335-9bac-4f8d7663ab6a\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.544720 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/646e47e3-7614-4a60-99e0-ce4718cece7c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"646e47e3-7614-4a60-99e0-ce4718cece7c\") " pod="openstack/ceilometer-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.549128 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xkvlq\" (UniqueName: \"kubernetes.io/projected/646e47e3-7614-4a60-99e0-ce4718cece7c-kube-api-access-xkvlq\") pod \"ceilometer-0\" (UID: \"646e47e3-7614-4a60-99e0-ce4718cece7c\") " pod="openstack/ceilometer-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.550345 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/446aadfb-ac91-4335-9bac-4f8d7663ab6a-config-data\") pod \"glance-default-internal-api-0\" (UID: \"446aadfb-ac91-4335-9bac-4f8d7663ab6a\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.553187 5021 scope.go:117] "RemoveContainer" containerID="7c3e16e55e662254318a8fe6f2cfeb3a8b68be5d8afa30f23faccc2de9884c49" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.557560 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/646e47e3-7614-4a60-99e0-ce4718cece7c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"646e47e3-7614-4a60-99e0-ce4718cece7c\") " pod="openstack/ceilometer-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.565239 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v8rs7\" (UniqueName: \"kubernetes.io/projected/446aadfb-ac91-4335-9bac-4f8d7663ab6a-kube-api-access-v8rs7\") pod \"glance-default-internal-api-0\" (UID: \"446aadfb-ac91-4335-9bac-4f8d7663ab6a\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.581703 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.585289 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-internal-api-0\" (UID: \"446aadfb-ac91-4335-9bac-4f8d7663ab6a\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.608955 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/446aadfb-ac91-4335-9bac-4f8d7663ab6a-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"446aadfb-ac91-4335-9bac-4f8d7663ab6a\") " pod="openstack/glance-default-internal-api-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.641288 5021 scope.go:117] "RemoveContainer" containerID="8ab34c578f1bfcdc54710774b53e185a546dbbbff8dc990f73a12bb3f2043672" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.675458 5021 scope.go:117] "RemoveContainer" containerID="55f5963d406f7c0bc674fef059a34f5ad473080ab784a93b9d67f2ae188bff0b" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.759280 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b81a4e0-f0d5-4e03-b00f-f77dad44be9c" path="/var/lib/kubelet/pods/9b81a4e0-f0d5-4e03-b00f-f77dad44be9c/volumes" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.760157 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="afc0c781-38b3-4ee6-ae9d-82d8649978cd" path="/var/lib/kubelet/pods/afc0c781-38b3-4ee6-ae9d-82d8649978cd/volumes" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.760775 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f6160307-8450-4492-96b7-b09c826919db" path="/var/lib/kubelet/pods/f6160307-8450-4492-96b7-b09c826919db/volumes" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.823539 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.856786 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-5cc8cdbd96-42qdw" Jan 21 15:46:20 crc kubenswrapper[5021]: I0121 15:46:20.904835 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 21 15:46:21 crc kubenswrapper[5021]: I0121 15:46:21.027167 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-p4kbs" event={"ID":"9a0ad139-d743-47ca-aecd-ee8a7ff59a7a","Type":"ContainerStarted","Data":"8c79ec307f2d19e5a94f8bf0685cbf52336b76b80d5f22a299bd44f0b35d6723"} Jan 21 15:46:21 crc kubenswrapper[5021]: W0121 15:46:21.193755 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0415e622_e0cf_4097_865a_a0970f2acc07.slice/crio-549188ecab545a9dba7f8ce68626136542e660601cf4fc70bf56379729785aaa WatchSource:0}: Error finding container 549188ecab545a9dba7f8ce68626136542e660601cf4fc70bf56379729785aaa: Status 404 returned error can't find the container with id 549188ecab545a9dba7f8ce68626136542e660601cf4fc70bf56379729785aaa Jan 21 15:46:21 crc kubenswrapper[5021]: I0121 15:46:21.204954 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 21 15:46:21 crc kubenswrapper[5021]: I0121 15:46:21.345576 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 21 15:46:21 crc kubenswrapper[5021]: I0121 15:46:21.704411 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 21 15:46:22 crc kubenswrapper[5021]: I0121 15:46:22.041053 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"646e47e3-7614-4a60-99e0-ce4718cece7c","Type":"ContainerStarted","Data":"1ae36592e9f893a476a95f96ead49911be659b1292cd3c8a7d9f8d62e14a199f"} Jan 21 15:46:22 crc kubenswrapper[5021]: I0121 15:46:22.043637 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"0415e622-e0cf-4097-865a-a0970f2acc07","Type":"ContainerStarted","Data":"549188ecab545a9dba7f8ce68626136542e660601cf4fc70bf56379729785aaa"} Jan 21 15:46:22 crc kubenswrapper[5021]: I0121 15:46:22.045582 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"446aadfb-ac91-4335-9bac-4f8d7663ab6a","Type":"ContainerStarted","Data":"76d465a0e5670e2e5c75f70f0e448f889c1854efb506c3af594373823736f312"} Jan 21 15:46:23 crc kubenswrapper[5021]: I0121 15:46:23.056056 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"0415e622-e0cf-4097-865a-a0970f2acc07","Type":"ContainerStarted","Data":"d29d9648241688d22882bb1ef26e5b75f7e75ce105b478819e5cb0b36d9eaa34"} Jan 21 15:46:24 crc kubenswrapper[5021]: I0121 15:46:24.071398 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"446aadfb-ac91-4335-9bac-4f8d7663ab6a","Type":"ContainerStarted","Data":"13e437facb9eed154cb3a1fc466a26799000944b7d4e3a944a35585bf94ce10b"} Jan 21 15:46:24 crc kubenswrapper[5021]: I0121 15:46:24.074240 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"0415e622-e0cf-4097-865a-a0970f2acc07","Type":"ContainerStarted","Data":"20a7c05d680426c518dcf812d8d7a9481aa09f5c574f8ae028a68e0fbb6c1a5e"} Jan 21 15:46:24 crc kubenswrapper[5021]: I0121 15:46:24.836015 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-c795c5585-m9bzp" Jan 21 15:46:24 crc kubenswrapper[5021]: I0121 15:46:24.900452 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-5cc8cdbd96-42qdw"] Jan 21 15:46:24 crc kubenswrapper[5021]: I0121 15:46:24.900714 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-5cc8cdbd96-42qdw" podUID="48140321-e331-4340-868b-d050bbfcbd92" containerName="neutron-api" containerID="cri-o://7db78f313be2bf899060a39f4f6e01c74bfd6f22e83686e7cc735c19bd8e3c1b" gracePeriod=30 Jan 21 15:46:24 crc kubenswrapper[5021]: I0121 15:46:24.900767 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-5cc8cdbd96-42qdw" podUID="48140321-e331-4340-868b-d050bbfcbd92" containerName="neutron-httpd" containerID="cri-o://dae06be82c19848eba9aae53d94ac929368f48abdf5774d76ed75afee2f1baa6" gracePeriod=30 Jan 21 15:46:26 crc kubenswrapper[5021]: I0121 15:46:26.070585 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=7.070560136 podStartE2EDuration="7.070560136s" podCreationTimestamp="2026-01-21 15:46:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:46:25.110011484 +0000 UTC m=+1326.645125393" watchObservedRunningTime="2026-01-21 15:46:26.070560136 +0000 UTC m=+1327.605674025" Jan 21 15:46:26 crc kubenswrapper[5021]: I0121 15:46:26.072730 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 21 15:46:26 crc kubenswrapper[5021]: I0121 15:46:26.100787 5021 generic.go:334] "Generic (PLEG): container finished" podID="48140321-e331-4340-868b-d050bbfcbd92" containerID="dae06be82c19848eba9aae53d94ac929368f48abdf5774d76ed75afee2f1baa6" exitCode=0 Jan 21 15:46:26 crc kubenswrapper[5021]: I0121 15:46:26.100840 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5cc8cdbd96-42qdw" event={"ID":"48140321-e331-4340-868b-d050bbfcbd92","Type":"ContainerDied","Data":"dae06be82c19848eba9aae53d94ac929368f48abdf5774d76ed75afee2f1baa6"} Jan 21 15:46:27 crc kubenswrapper[5021]: I0121 15:46:27.115251 5021 generic.go:334] "Generic (PLEG): container finished" podID="48140321-e331-4340-868b-d050bbfcbd92" containerID="7db78f313be2bf899060a39f4f6e01c74bfd6f22e83686e7cc735c19bd8e3c1b" exitCode=0 Jan 21 15:46:27 crc kubenswrapper[5021]: I0121 15:46:27.115295 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5cc8cdbd96-42qdw" event={"ID":"48140321-e331-4340-868b-d050bbfcbd92","Type":"ContainerDied","Data":"7db78f313be2bf899060a39f4f6e01c74bfd6f22e83686e7cc735c19bd8e3c1b"} Jan 21 15:46:30 crc kubenswrapper[5021]: I0121 15:46:30.595195 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 21 15:46:30 crc kubenswrapper[5021]: I0121 15:46:30.595731 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 21 15:46:30 crc kubenswrapper[5021]: I0121 15:46:30.658646 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 21 15:46:30 crc kubenswrapper[5021]: I0121 15:46:30.666709 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 21 15:46:30 crc kubenswrapper[5021]: I0121 15:46:30.693722 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5cc8cdbd96-42qdw" Jan 21 15:46:30 crc kubenswrapper[5021]: I0121 15:46:30.854732 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48140321-e331-4340-868b-d050bbfcbd92-combined-ca-bundle\") pod \"48140321-e331-4340-868b-d050bbfcbd92\" (UID: \"48140321-e331-4340-868b-d050bbfcbd92\") " Jan 21 15:46:30 crc kubenswrapper[5021]: I0121 15:46:30.854864 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/48140321-e331-4340-868b-d050bbfcbd92-config\") pod \"48140321-e331-4340-868b-d050bbfcbd92\" (UID: \"48140321-e331-4340-868b-d050bbfcbd92\") " Jan 21 15:46:30 crc kubenswrapper[5021]: I0121 15:46:30.855450 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/48140321-e331-4340-868b-d050bbfcbd92-ovndb-tls-certs\") pod \"48140321-e331-4340-868b-d050bbfcbd92\" (UID: \"48140321-e331-4340-868b-d050bbfcbd92\") " Jan 21 15:46:30 crc kubenswrapper[5021]: I0121 15:46:30.855542 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tlwcl\" (UniqueName: \"kubernetes.io/projected/48140321-e331-4340-868b-d050bbfcbd92-kube-api-access-tlwcl\") pod \"48140321-e331-4340-868b-d050bbfcbd92\" (UID: \"48140321-e331-4340-868b-d050bbfcbd92\") " Jan 21 15:46:30 crc kubenswrapper[5021]: I0121 15:46:30.855588 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/48140321-e331-4340-868b-d050bbfcbd92-httpd-config\") pod \"48140321-e331-4340-868b-d050bbfcbd92\" (UID: \"48140321-e331-4340-868b-d050bbfcbd92\") " Jan 21 15:46:30 crc kubenswrapper[5021]: I0121 15:46:30.917137 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/48140321-e331-4340-868b-d050bbfcbd92-kube-api-access-tlwcl" (OuterVolumeSpecName: "kube-api-access-tlwcl") pod "48140321-e331-4340-868b-d050bbfcbd92" (UID: "48140321-e331-4340-868b-d050bbfcbd92"). InnerVolumeSpecName "kube-api-access-tlwcl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:46:30 crc kubenswrapper[5021]: I0121 15:46:30.917781 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/48140321-e331-4340-868b-d050bbfcbd92-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "48140321-e331-4340-868b-d050bbfcbd92" (UID: "48140321-e331-4340-868b-d050bbfcbd92"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:46:30 crc kubenswrapper[5021]: I0121 15:46:30.935264 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/48140321-e331-4340-868b-d050bbfcbd92-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "48140321-e331-4340-868b-d050bbfcbd92" (UID: "48140321-e331-4340-868b-d050bbfcbd92"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:46:30 crc kubenswrapper[5021]: I0121 15:46:30.958895 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tlwcl\" (UniqueName: \"kubernetes.io/projected/48140321-e331-4340-868b-d050bbfcbd92-kube-api-access-tlwcl\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:30 crc kubenswrapper[5021]: I0121 15:46:30.958962 5021 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/48140321-e331-4340-868b-d050bbfcbd92-httpd-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:30 crc kubenswrapper[5021]: I0121 15:46:30.958972 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48140321-e331-4340-868b-d050bbfcbd92-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:30 crc kubenswrapper[5021]: I0121 15:46:30.960572 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/48140321-e331-4340-868b-d050bbfcbd92-config" (OuterVolumeSpecName: "config") pod "48140321-e331-4340-868b-d050bbfcbd92" (UID: "48140321-e331-4340-868b-d050bbfcbd92"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:46:31 crc kubenswrapper[5021]: I0121 15:46:31.008366 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/48140321-e331-4340-868b-d050bbfcbd92-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "48140321-e331-4340-868b-d050bbfcbd92" (UID: "48140321-e331-4340-868b-d050bbfcbd92"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:46:31 crc kubenswrapper[5021]: I0121 15:46:31.061629 5021 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/48140321-e331-4340-868b-d050bbfcbd92-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:31 crc kubenswrapper[5021]: I0121 15:46:31.061666 5021 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/48140321-e331-4340-868b-d050bbfcbd92-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:31 crc kubenswrapper[5021]: I0121 15:46:31.156193 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5cc8cdbd96-42qdw" event={"ID":"48140321-e331-4340-868b-d050bbfcbd92","Type":"ContainerDied","Data":"25155ad38b901827ea1851d388cf62d76c81ec29d7a1a1e7ddd2370c508ec43e"} Jan 21 15:46:31 crc kubenswrapper[5021]: I0121 15:46:31.156246 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5cc8cdbd96-42qdw" Jan 21 15:46:31 crc kubenswrapper[5021]: I0121 15:46:31.156272 5021 scope.go:117] "RemoveContainer" containerID="dae06be82c19848eba9aae53d94ac929368f48abdf5774d76ed75afee2f1baa6" Jan 21 15:46:31 crc kubenswrapper[5021]: I0121 15:46:31.159364 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"646e47e3-7614-4a60-99e0-ce4718cece7c","Type":"ContainerStarted","Data":"38e31dab8c6d3626d08ad3eb6db0af1be035bcfbe6f93c70f3998b7726f9d870"} Jan 21 15:46:31 crc kubenswrapper[5021]: I0121 15:46:31.162207 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-p4kbs" event={"ID":"9a0ad139-d743-47ca-aecd-ee8a7ff59a7a","Type":"ContainerStarted","Data":"940f863573ce6eb57381c2fbbca658d251917ef9b33253b0a3d16577b3809021"} Jan 21 15:46:31 crc kubenswrapper[5021]: I0121 15:46:31.164321 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 21 15:46:31 crc kubenswrapper[5021]: I0121 15:46:31.164360 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 21 15:46:31 crc kubenswrapper[5021]: I0121 15:46:31.185655 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-p4kbs" podStartSLOduration=1.915065663 podStartE2EDuration="12.185639709s" podCreationTimestamp="2026-01-21 15:46:19 +0000 UTC" firstStartedPulling="2026-01-21 15:46:20.111561809 +0000 UTC m=+1321.646675708" lastFinishedPulling="2026-01-21 15:46:30.382135865 +0000 UTC m=+1331.917249754" observedRunningTime="2026-01-21 15:46:31.179314187 +0000 UTC m=+1332.714428076" watchObservedRunningTime="2026-01-21 15:46:31.185639709 +0000 UTC m=+1332.720753598" Jan 21 15:46:31 crc kubenswrapper[5021]: I0121 15:46:31.207730 5021 scope.go:117] "RemoveContainer" containerID="7db78f313be2bf899060a39f4f6e01c74bfd6f22e83686e7cc735c19bd8e3c1b" Jan 21 15:46:31 crc kubenswrapper[5021]: I0121 15:46:31.212985 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-5cc8cdbd96-42qdw"] Jan 21 15:46:31 crc kubenswrapper[5021]: I0121 15:46:31.220301 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-5cc8cdbd96-42qdw"] Jan 21 15:46:32 crc kubenswrapper[5021]: I0121 15:46:32.175279 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"446aadfb-ac91-4335-9bac-4f8d7663ab6a","Type":"ContainerStarted","Data":"6b8005ac26237642083ae21d321912d31a41bc53e8cc8714923e3a28c95e2695"} Jan 21 15:46:32 crc kubenswrapper[5021]: I0121 15:46:32.180648 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"646e47e3-7614-4a60-99e0-ce4718cece7c","Type":"ContainerStarted","Data":"273b50d7685af307bdffcb4f3c4c0bcc6e1631dc55252d6f917afa9497109dce"} Jan 21 15:46:32 crc kubenswrapper[5021]: I0121 15:46:32.204305 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=12.204279554 podStartE2EDuration="12.204279554s" podCreationTimestamp="2026-01-21 15:46:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:46:32.199588526 +0000 UTC m=+1333.734702425" watchObservedRunningTime="2026-01-21 15:46:32.204279554 +0000 UTC m=+1333.739393453" Jan 21 15:46:32 crc kubenswrapper[5021]: I0121 15:46:32.751802 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="48140321-e331-4340-868b-d050bbfcbd92" path="/var/lib/kubelet/pods/48140321-e331-4340-868b-d050bbfcbd92/volumes" Jan 21 15:46:33 crc kubenswrapper[5021]: I0121 15:46:33.193021 5021 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 21 15:46:33 crc kubenswrapper[5021]: I0121 15:46:33.193798 5021 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 21 15:46:33 crc kubenswrapper[5021]: I0121 15:46:33.193003 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"646e47e3-7614-4a60-99e0-ce4718cece7c","Type":"ContainerStarted","Data":"acde083c895f30effbc0221fb870f953f24b608790b3118596a4993f04b0e7a7"} Jan 21 15:46:33 crc kubenswrapper[5021]: I0121 15:46:33.466310 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 21 15:46:33 crc kubenswrapper[5021]: I0121 15:46:33.474866 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 21 15:46:35 crc kubenswrapper[5021]: I0121 15:46:35.212824 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="646e47e3-7614-4a60-99e0-ce4718cece7c" containerName="ceilometer-central-agent" containerID="cri-o://38e31dab8c6d3626d08ad3eb6db0af1be035bcfbe6f93c70f3998b7726f9d870" gracePeriod=30 Jan 21 15:46:35 crc kubenswrapper[5021]: I0121 15:46:35.213360 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"646e47e3-7614-4a60-99e0-ce4718cece7c","Type":"ContainerStarted","Data":"e7d1bb7490a600cd78c72dadfaa7445c761707fe536eaf8193b301d12feb0071"} Jan 21 15:46:35 crc kubenswrapper[5021]: I0121 15:46:35.213457 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="646e47e3-7614-4a60-99e0-ce4718cece7c" containerName="sg-core" containerID="cri-o://acde083c895f30effbc0221fb870f953f24b608790b3118596a4993f04b0e7a7" gracePeriod=30 Jan 21 15:46:35 crc kubenswrapper[5021]: I0121 15:46:35.213558 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="646e47e3-7614-4a60-99e0-ce4718cece7c" containerName="proxy-httpd" containerID="cri-o://e7d1bb7490a600cd78c72dadfaa7445c761707fe536eaf8193b301d12feb0071" gracePeriod=30 Jan 21 15:46:35 crc kubenswrapper[5021]: I0121 15:46:35.213569 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="646e47e3-7614-4a60-99e0-ce4718cece7c" containerName="ceilometer-notification-agent" containerID="cri-o://273b50d7685af307bdffcb4f3c4c0bcc6e1631dc55252d6f917afa9497109dce" gracePeriod=30 Jan 21 15:46:35 crc kubenswrapper[5021]: I0121 15:46:35.213569 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 21 15:46:35 crc kubenswrapper[5021]: I0121 15:46:35.241361 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.542134286 podStartE2EDuration="15.241337636s" podCreationTimestamp="2026-01-21 15:46:20 +0000 UTC" firstStartedPulling="2026-01-21 15:46:21.355670757 +0000 UTC m=+1322.890784646" lastFinishedPulling="2026-01-21 15:46:34.054874107 +0000 UTC m=+1335.589987996" observedRunningTime="2026-01-21 15:46:35.233728788 +0000 UTC m=+1336.768842687" watchObservedRunningTime="2026-01-21 15:46:35.241337636 +0000 UTC m=+1336.776451525" Jan 21 15:46:37 crc kubenswrapper[5021]: I0121 15:46:37.235134 5021 generic.go:334] "Generic (PLEG): container finished" podID="646e47e3-7614-4a60-99e0-ce4718cece7c" containerID="e7d1bb7490a600cd78c72dadfaa7445c761707fe536eaf8193b301d12feb0071" exitCode=0 Jan 21 15:46:37 crc kubenswrapper[5021]: I0121 15:46:37.235438 5021 generic.go:334] "Generic (PLEG): container finished" podID="646e47e3-7614-4a60-99e0-ce4718cece7c" containerID="acde083c895f30effbc0221fb870f953f24b608790b3118596a4993f04b0e7a7" exitCode=2 Jan 21 15:46:37 crc kubenswrapper[5021]: I0121 15:46:37.235450 5021 generic.go:334] "Generic (PLEG): container finished" podID="646e47e3-7614-4a60-99e0-ce4718cece7c" containerID="273b50d7685af307bdffcb4f3c4c0bcc6e1631dc55252d6f917afa9497109dce" exitCode=0 Jan 21 15:46:37 crc kubenswrapper[5021]: I0121 15:46:37.235212 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"646e47e3-7614-4a60-99e0-ce4718cece7c","Type":"ContainerDied","Data":"e7d1bb7490a600cd78c72dadfaa7445c761707fe536eaf8193b301d12feb0071"} Jan 21 15:46:37 crc kubenswrapper[5021]: I0121 15:46:37.235494 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"646e47e3-7614-4a60-99e0-ce4718cece7c","Type":"ContainerDied","Data":"acde083c895f30effbc0221fb870f953f24b608790b3118596a4993f04b0e7a7"} Jan 21 15:46:37 crc kubenswrapper[5021]: I0121 15:46:37.235513 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"646e47e3-7614-4a60-99e0-ce4718cece7c","Type":"ContainerDied","Data":"273b50d7685af307bdffcb4f3c4c0bcc6e1631dc55252d6f917afa9497109dce"} Jan 21 15:46:40 crc kubenswrapper[5021]: I0121 15:46:40.666286 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 21 15:46:40 crc kubenswrapper[5021]: I0121 15:46:40.846678 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/646e47e3-7614-4a60-99e0-ce4718cece7c-sg-core-conf-yaml\") pod \"646e47e3-7614-4a60-99e0-ce4718cece7c\" (UID: \"646e47e3-7614-4a60-99e0-ce4718cece7c\") " Jan 21 15:46:40 crc kubenswrapper[5021]: I0121 15:46:40.846750 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/646e47e3-7614-4a60-99e0-ce4718cece7c-combined-ca-bundle\") pod \"646e47e3-7614-4a60-99e0-ce4718cece7c\" (UID: \"646e47e3-7614-4a60-99e0-ce4718cece7c\") " Jan 21 15:46:40 crc kubenswrapper[5021]: I0121 15:46:40.846818 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/646e47e3-7614-4a60-99e0-ce4718cece7c-log-httpd\") pod \"646e47e3-7614-4a60-99e0-ce4718cece7c\" (UID: \"646e47e3-7614-4a60-99e0-ce4718cece7c\") " Jan 21 15:46:40 crc kubenswrapper[5021]: I0121 15:46:40.846858 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/646e47e3-7614-4a60-99e0-ce4718cece7c-ceilometer-tls-certs\") pod \"646e47e3-7614-4a60-99e0-ce4718cece7c\" (UID: \"646e47e3-7614-4a60-99e0-ce4718cece7c\") " Jan 21 15:46:40 crc kubenswrapper[5021]: I0121 15:46:40.846887 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/646e47e3-7614-4a60-99e0-ce4718cece7c-scripts\") pod \"646e47e3-7614-4a60-99e0-ce4718cece7c\" (UID: \"646e47e3-7614-4a60-99e0-ce4718cece7c\") " Jan 21 15:46:40 crc kubenswrapper[5021]: I0121 15:46:40.846960 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/646e47e3-7614-4a60-99e0-ce4718cece7c-run-httpd\") pod \"646e47e3-7614-4a60-99e0-ce4718cece7c\" (UID: \"646e47e3-7614-4a60-99e0-ce4718cece7c\") " Jan 21 15:46:40 crc kubenswrapper[5021]: I0121 15:46:40.847002 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/646e47e3-7614-4a60-99e0-ce4718cece7c-config-data\") pod \"646e47e3-7614-4a60-99e0-ce4718cece7c\" (UID: \"646e47e3-7614-4a60-99e0-ce4718cece7c\") " Jan 21 15:46:40 crc kubenswrapper[5021]: I0121 15:46:40.847051 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xkvlq\" (UniqueName: \"kubernetes.io/projected/646e47e3-7614-4a60-99e0-ce4718cece7c-kube-api-access-xkvlq\") pod \"646e47e3-7614-4a60-99e0-ce4718cece7c\" (UID: \"646e47e3-7614-4a60-99e0-ce4718cece7c\") " Jan 21 15:46:40 crc kubenswrapper[5021]: I0121 15:46:40.847340 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/646e47e3-7614-4a60-99e0-ce4718cece7c-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "646e47e3-7614-4a60-99e0-ce4718cece7c" (UID: "646e47e3-7614-4a60-99e0-ce4718cece7c"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:46:40 crc kubenswrapper[5021]: I0121 15:46:40.847441 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/646e47e3-7614-4a60-99e0-ce4718cece7c-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "646e47e3-7614-4a60-99e0-ce4718cece7c" (UID: "646e47e3-7614-4a60-99e0-ce4718cece7c"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:46:40 crc kubenswrapper[5021]: I0121 15:46:40.847938 5021 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/646e47e3-7614-4a60-99e0-ce4718cece7c-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:40 crc kubenswrapper[5021]: I0121 15:46:40.847957 5021 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/646e47e3-7614-4a60-99e0-ce4718cece7c-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:40 crc kubenswrapper[5021]: I0121 15:46:40.852957 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/646e47e3-7614-4a60-99e0-ce4718cece7c-kube-api-access-xkvlq" (OuterVolumeSpecName: "kube-api-access-xkvlq") pod "646e47e3-7614-4a60-99e0-ce4718cece7c" (UID: "646e47e3-7614-4a60-99e0-ce4718cece7c"). InnerVolumeSpecName "kube-api-access-xkvlq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:46:40 crc kubenswrapper[5021]: I0121 15:46:40.857118 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/646e47e3-7614-4a60-99e0-ce4718cece7c-scripts" (OuterVolumeSpecName: "scripts") pod "646e47e3-7614-4a60-99e0-ce4718cece7c" (UID: "646e47e3-7614-4a60-99e0-ce4718cece7c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:46:40 crc kubenswrapper[5021]: I0121 15:46:40.877188 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/646e47e3-7614-4a60-99e0-ce4718cece7c-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "646e47e3-7614-4a60-99e0-ce4718cece7c" (UID: "646e47e3-7614-4a60-99e0-ce4718cece7c"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:46:40 crc kubenswrapper[5021]: I0121 15:46:40.902364 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/646e47e3-7614-4a60-99e0-ce4718cece7c-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "646e47e3-7614-4a60-99e0-ce4718cece7c" (UID: "646e47e3-7614-4a60-99e0-ce4718cece7c"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:46:40 crc kubenswrapper[5021]: I0121 15:46:40.906108 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 21 15:46:40 crc kubenswrapper[5021]: I0121 15:46:40.907785 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 21 15:46:40 crc kubenswrapper[5021]: I0121 15:46:40.932089 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/646e47e3-7614-4a60-99e0-ce4718cece7c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "646e47e3-7614-4a60-99e0-ce4718cece7c" (UID: "646e47e3-7614-4a60-99e0-ce4718cece7c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:46:40 crc kubenswrapper[5021]: I0121 15:46:40.939359 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 21 15:46:40 crc kubenswrapper[5021]: I0121 15:46:40.949504 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/646e47e3-7614-4a60-99e0-ce4718cece7c-config-data" (OuterVolumeSpecName: "config-data") pod "646e47e3-7614-4a60-99e0-ce4718cece7c" (UID: "646e47e3-7614-4a60-99e0-ce4718cece7c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:46:40 crc kubenswrapper[5021]: I0121 15:46:40.950062 5021 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/646e47e3-7614-4a60-99e0-ce4718cece7c-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:40 crc kubenswrapper[5021]: I0121 15:46:40.950098 5021 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/646e47e3-7614-4a60-99e0-ce4718cece7c-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:40 crc kubenswrapper[5021]: I0121 15:46:40.950224 5021 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/646e47e3-7614-4a60-99e0-ce4718cece7c-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:40 crc kubenswrapper[5021]: I0121 15:46:40.950279 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xkvlq\" (UniqueName: \"kubernetes.io/projected/646e47e3-7614-4a60-99e0-ce4718cece7c-kube-api-access-xkvlq\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:40 crc kubenswrapper[5021]: I0121 15:46:40.950292 5021 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/646e47e3-7614-4a60-99e0-ce4718cece7c-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:40 crc kubenswrapper[5021]: I0121 15:46:40.950300 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/646e47e3-7614-4a60-99e0-ce4718cece7c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:40 crc kubenswrapper[5021]: I0121 15:46:40.970968 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.271422 5021 generic.go:334] "Generic (PLEG): container finished" podID="646e47e3-7614-4a60-99e0-ce4718cece7c" containerID="38e31dab8c6d3626d08ad3eb6db0af1be035bcfbe6f93c70f3998b7726f9d870" exitCode=0 Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.271458 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"646e47e3-7614-4a60-99e0-ce4718cece7c","Type":"ContainerDied","Data":"38e31dab8c6d3626d08ad3eb6db0af1be035bcfbe6f93c70f3998b7726f9d870"} Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.271835 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"646e47e3-7614-4a60-99e0-ce4718cece7c","Type":"ContainerDied","Data":"1ae36592e9f893a476a95f96ead49911be659b1292cd3c8a7d9f8d62e14a199f"} Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.271868 5021 scope.go:117] "RemoveContainer" containerID="e7d1bb7490a600cd78c72dadfaa7445c761707fe536eaf8193b301d12feb0071" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.271510 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.272677 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.272717 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.303428 5021 scope.go:117] "RemoveContainer" containerID="acde083c895f30effbc0221fb870f953f24b608790b3118596a4993f04b0e7a7" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.317596 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.324611 5021 scope.go:117] "RemoveContainer" containerID="273b50d7685af307bdffcb4f3c4c0bcc6e1631dc55252d6f917afa9497109dce" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.335110 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.352170 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 21 15:46:41 crc kubenswrapper[5021]: E0121 15:46:41.352599 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="646e47e3-7614-4a60-99e0-ce4718cece7c" containerName="proxy-httpd" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.352616 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="646e47e3-7614-4a60-99e0-ce4718cece7c" containerName="proxy-httpd" Jan 21 15:46:41 crc kubenswrapper[5021]: E0121 15:46:41.352633 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="48140321-e331-4340-868b-d050bbfcbd92" containerName="neutron-httpd" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.352642 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="48140321-e331-4340-868b-d050bbfcbd92" containerName="neutron-httpd" Jan 21 15:46:41 crc kubenswrapper[5021]: E0121 15:46:41.352651 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="48140321-e331-4340-868b-d050bbfcbd92" containerName="neutron-api" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.352659 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="48140321-e331-4340-868b-d050bbfcbd92" containerName="neutron-api" Jan 21 15:46:41 crc kubenswrapper[5021]: E0121 15:46:41.352670 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="646e47e3-7614-4a60-99e0-ce4718cece7c" containerName="ceilometer-central-agent" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.352676 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="646e47e3-7614-4a60-99e0-ce4718cece7c" containerName="ceilometer-central-agent" Jan 21 15:46:41 crc kubenswrapper[5021]: E0121 15:46:41.352705 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="646e47e3-7614-4a60-99e0-ce4718cece7c" containerName="ceilometer-notification-agent" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.352712 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="646e47e3-7614-4a60-99e0-ce4718cece7c" containerName="ceilometer-notification-agent" Jan 21 15:46:41 crc kubenswrapper[5021]: E0121 15:46:41.352725 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="646e47e3-7614-4a60-99e0-ce4718cece7c" containerName="sg-core" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.352732 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="646e47e3-7614-4a60-99e0-ce4718cece7c" containerName="sg-core" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.352732 5021 scope.go:117] "RemoveContainer" containerID="38e31dab8c6d3626d08ad3eb6db0af1be035bcfbe6f93c70f3998b7726f9d870" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.352959 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="646e47e3-7614-4a60-99e0-ce4718cece7c" containerName="proxy-httpd" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.352978 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="646e47e3-7614-4a60-99e0-ce4718cece7c" containerName="ceilometer-notification-agent" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.352994 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="48140321-e331-4340-868b-d050bbfcbd92" containerName="neutron-api" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.353004 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="646e47e3-7614-4a60-99e0-ce4718cece7c" containerName="sg-core" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.353022 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="48140321-e331-4340-868b-d050bbfcbd92" containerName="neutron-httpd" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.353038 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="646e47e3-7614-4a60-99e0-ce4718cece7c" containerName="ceilometer-central-agent" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.355039 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.358390 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.358701 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.358860 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.361390 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.411623 5021 scope.go:117] "RemoveContainer" containerID="e7d1bb7490a600cd78c72dadfaa7445c761707fe536eaf8193b301d12feb0071" Jan 21 15:46:41 crc kubenswrapper[5021]: E0121 15:46:41.412165 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e7d1bb7490a600cd78c72dadfaa7445c761707fe536eaf8193b301d12feb0071\": container with ID starting with e7d1bb7490a600cd78c72dadfaa7445c761707fe536eaf8193b301d12feb0071 not found: ID does not exist" containerID="e7d1bb7490a600cd78c72dadfaa7445c761707fe536eaf8193b301d12feb0071" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.412235 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e7d1bb7490a600cd78c72dadfaa7445c761707fe536eaf8193b301d12feb0071"} err="failed to get container status \"e7d1bb7490a600cd78c72dadfaa7445c761707fe536eaf8193b301d12feb0071\": rpc error: code = NotFound desc = could not find container \"e7d1bb7490a600cd78c72dadfaa7445c761707fe536eaf8193b301d12feb0071\": container with ID starting with e7d1bb7490a600cd78c72dadfaa7445c761707fe536eaf8193b301d12feb0071 not found: ID does not exist" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.412260 5021 scope.go:117] "RemoveContainer" containerID="acde083c895f30effbc0221fb870f953f24b608790b3118596a4993f04b0e7a7" Jan 21 15:46:41 crc kubenswrapper[5021]: E0121 15:46:41.413076 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"acde083c895f30effbc0221fb870f953f24b608790b3118596a4993f04b0e7a7\": container with ID starting with acde083c895f30effbc0221fb870f953f24b608790b3118596a4993f04b0e7a7 not found: ID does not exist" containerID="acde083c895f30effbc0221fb870f953f24b608790b3118596a4993f04b0e7a7" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.413119 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"acde083c895f30effbc0221fb870f953f24b608790b3118596a4993f04b0e7a7"} err="failed to get container status \"acde083c895f30effbc0221fb870f953f24b608790b3118596a4993f04b0e7a7\": rpc error: code = NotFound desc = could not find container \"acde083c895f30effbc0221fb870f953f24b608790b3118596a4993f04b0e7a7\": container with ID starting with acde083c895f30effbc0221fb870f953f24b608790b3118596a4993f04b0e7a7 not found: ID does not exist" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.413156 5021 scope.go:117] "RemoveContainer" containerID="273b50d7685af307bdffcb4f3c4c0bcc6e1631dc55252d6f917afa9497109dce" Jan 21 15:46:41 crc kubenswrapper[5021]: E0121 15:46:41.414061 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"273b50d7685af307bdffcb4f3c4c0bcc6e1631dc55252d6f917afa9497109dce\": container with ID starting with 273b50d7685af307bdffcb4f3c4c0bcc6e1631dc55252d6f917afa9497109dce not found: ID does not exist" containerID="273b50d7685af307bdffcb4f3c4c0bcc6e1631dc55252d6f917afa9497109dce" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.414084 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"273b50d7685af307bdffcb4f3c4c0bcc6e1631dc55252d6f917afa9497109dce"} err="failed to get container status \"273b50d7685af307bdffcb4f3c4c0bcc6e1631dc55252d6f917afa9497109dce\": rpc error: code = NotFound desc = could not find container \"273b50d7685af307bdffcb4f3c4c0bcc6e1631dc55252d6f917afa9497109dce\": container with ID starting with 273b50d7685af307bdffcb4f3c4c0bcc6e1631dc55252d6f917afa9497109dce not found: ID does not exist" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.414102 5021 scope.go:117] "RemoveContainer" containerID="38e31dab8c6d3626d08ad3eb6db0af1be035bcfbe6f93c70f3998b7726f9d870" Jan 21 15:46:41 crc kubenswrapper[5021]: E0121 15:46:41.414428 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"38e31dab8c6d3626d08ad3eb6db0af1be035bcfbe6f93c70f3998b7726f9d870\": container with ID starting with 38e31dab8c6d3626d08ad3eb6db0af1be035bcfbe6f93c70f3998b7726f9d870 not found: ID does not exist" containerID="38e31dab8c6d3626d08ad3eb6db0af1be035bcfbe6f93c70f3998b7726f9d870" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.414451 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38e31dab8c6d3626d08ad3eb6db0af1be035bcfbe6f93c70f3998b7726f9d870"} err="failed to get container status \"38e31dab8c6d3626d08ad3eb6db0af1be035bcfbe6f93c70f3998b7726f9d870\": rpc error: code = NotFound desc = could not find container \"38e31dab8c6d3626d08ad3eb6db0af1be035bcfbe6f93c70f3998b7726f9d870\": container with ID starting with 38e31dab8c6d3626d08ad3eb6db0af1be035bcfbe6f93c70f3998b7726f9d870 not found: ID does not exist" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.460605 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3a35604b-95f9-44e2-87d1-27ae8b8ca41c-run-httpd\") pod \"ceilometer-0\" (UID: \"3a35604b-95f9-44e2-87d1-27ae8b8ca41c\") " pod="openstack/ceilometer-0" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.460658 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3a35604b-95f9-44e2-87d1-27ae8b8ca41c-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"3a35604b-95f9-44e2-87d1-27ae8b8ca41c\") " pod="openstack/ceilometer-0" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.460688 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x58p2\" (UniqueName: \"kubernetes.io/projected/3a35604b-95f9-44e2-87d1-27ae8b8ca41c-kube-api-access-x58p2\") pod \"ceilometer-0\" (UID: \"3a35604b-95f9-44e2-87d1-27ae8b8ca41c\") " pod="openstack/ceilometer-0" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.460720 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3a35604b-95f9-44e2-87d1-27ae8b8ca41c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3a35604b-95f9-44e2-87d1-27ae8b8ca41c\") " pod="openstack/ceilometer-0" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.460741 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3a35604b-95f9-44e2-87d1-27ae8b8ca41c-log-httpd\") pod \"ceilometer-0\" (UID: \"3a35604b-95f9-44e2-87d1-27ae8b8ca41c\") " pod="openstack/ceilometer-0" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.461032 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a35604b-95f9-44e2-87d1-27ae8b8ca41c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3a35604b-95f9-44e2-87d1-27ae8b8ca41c\") " pod="openstack/ceilometer-0" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.461149 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a35604b-95f9-44e2-87d1-27ae8b8ca41c-config-data\") pod \"ceilometer-0\" (UID: \"3a35604b-95f9-44e2-87d1-27ae8b8ca41c\") " pod="openstack/ceilometer-0" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.461295 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3a35604b-95f9-44e2-87d1-27ae8b8ca41c-scripts\") pod \"ceilometer-0\" (UID: \"3a35604b-95f9-44e2-87d1-27ae8b8ca41c\") " pod="openstack/ceilometer-0" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.563442 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3a35604b-95f9-44e2-87d1-27ae8b8ca41c-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"3a35604b-95f9-44e2-87d1-27ae8b8ca41c\") " pod="openstack/ceilometer-0" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.563541 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x58p2\" (UniqueName: \"kubernetes.io/projected/3a35604b-95f9-44e2-87d1-27ae8b8ca41c-kube-api-access-x58p2\") pod \"ceilometer-0\" (UID: \"3a35604b-95f9-44e2-87d1-27ae8b8ca41c\") " pod="openstack/ceilometer-0" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.563594 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3a35604b-95f9-44e2-87d1-27ae8b8ca41c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3a35604b-95f9-44e2-87d1-27ae8b8ca41c\") " pod="openstack/ceilometer-0" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.563627 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3a35604b-95f9-44e2-87d1-27ae8b8ca41c-log-httpd\") pod \"ceilometer-0\" (UID: \"3a35604b-95f9-44e2-87d1-27ae8b8ca41c\") " pod="openstack/ceilometer-0" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.563670 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a35604b-95f9-44e2-87d1-27ae8b8ca41c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3a35604b-95f9-44e2-87d1-27ae8b8ca41c\") " pod="openstack/ceilometer-0" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.563713 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a35604b-95f9-44e2-87d1-27ae8b8ca41c-config-data\") pod \"ceilometer-0\" (UID: \"3a35604b-95f9-44e2-87d1-27ae8b8ca41c\") " pod="openstack/ceilometer-0" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.563794 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3a35604b-95f9-44e2-87d1-27ae8b8ca41c-scripts\") pod \"ceilometer-0\" (UID: \"3a35604b-95f9-44e2-87d1-27ae8b8ca41c\") " pod="openstack/ceilometer-0" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.563899 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3a35604b-95f9-44e2-87d1-27ae8b8ca41c-run-httpd\") pod \"ceilometer-0\" (UID: \"3a35604b-95f9-44e2-87d1-27ae8b8ca41c\") " pod="openstack/ceilometer-0" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.564229 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3a35604b-95f9-44e2-87d1-27ae8b8ca41c-log-httpd\") pod \"ceilometer-0\" (UID: \"3a35604b-95f9-44e2-87d1-27ae8b8ca41c\") " pod="openstack/ceilometer-0" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.564441 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3a35604b-95f9-44e2-87d1-27ae8b8ca41c-run-httpd\") pod \"ceilometer-0\" (UID: \"3a35604b-95f9-44e2-87d1-27ae8b8ca41c\") " pod="openstack/ceilometer-0" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.568826 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3a35604b-95f9-44e2-87d1-27ae8b8ca41c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3a35604b-95f9-44e2-87d1-27ae8b8ca41c\") " pod="openstack/ceilometer-0" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.569047 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3a35604b-95f9-44e2-87d1-27ae8b8ca41c-scripts\") pod \"ceilometer-0\" (UID: \"3a35604b-95f9-44e2-87d1-27ae8b8ca41c\") " pod="openstack/ceilometer-0" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.569502 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a35604b-95f9-44e2-87d1-27ae8b8ca41c-config-data\") pod \"ceilometer-0\" (UID: \"3a35604b-95f9-44e2-87d1-27ae8b8ca41c\") " pod="openstack/ceilometer-0" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.569664 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3a35604b-95f9-44e2-87d1-27ae8b8ca41c-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"3a35604b-95f9-44e2-87d1-27ae8b8ca41c\") " pod="openstack/ceilometer-0" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.576751 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a35604b-95f9-44e2-87d1-27ae8b8ca41c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3a35604b-95f9-44e2-87d1-27ae8b8ca41c\") " pod="openstack/ceilometer-0" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.580069 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x58p2\" (UniqueName: \"kubernetes.io/projected/3a35604b-95f9-44e2-87d1-27ae8b8ca41c-kube-api-access-x58p2\") pod \"ceilometer-0\" (UID: \"3a35604b-95f9-44e2-87d1-27ae8b8ca41c\") " pod="openstack/ceilometer-0" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.716136 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 21 15:46:41 crc kubenswrapper[5021]: I0121 15:46:41.916901 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 21 15:46:42 crc kubenswrapper[5021]: I0121 15:46:42.160752 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 21 15:46:42 crc kubenswrapper[5021]: W0121 15:46:42.162862 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3a35604b_95f9_44e2_87d1_27ae8b8ca41c.slice/crio-04229b6f8504862a6851be658ccad012e548cdd37176ef1fc9e6c93138410be6 WatchSource:0}: Error finding container 04229b6f8504862a6851be658ccad012e548cdd37176ef1fc9e6c93138410be6: Status 404 returned error can't find the container with id 04229b6f8504862a6851be658ccad012e548cdd37176ef1fc9e6c93138410be6 Jan 21 15:46:42 crc kubenswrapper[5021]: I0121 15:46:42.285845 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3a35604b-95f9-44e2-87d1-27ae8b8ca41c","Type":"ContainerStarted","Data":"04229b6f8504862a6851be658ccad012e548cdd37176ef1fc9e6c93138410be6"} Jan 21 15:46:42 crc kubenswrapper[5021]: I0121 15:46:42.357680 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 15:46:42 crc kubenswrapper[5021]: I0121 15:46:42.357753 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 15:46:42 crc kubenswrapper[5021]: I0121 15:46:42.750770 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="646e47e3-7614-4a60-99e0-ce4718cece7c" path="/var/lib/kubelet/pods/646e47e3-7614-4a60-99e0-ce4718cece7c/volumes" Jan 21 15:46:43 crc kubenswrapper[5021]: I0121 15:46:43.296640 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3a35604b-95f9-44e2-87d1-27ae8b8ca41c","Type":"ContainerStarted","Data":"b6d6af3abe4e3cc720705d7881fbdac97c46933d42262fdf4d4d40ade1b37035"} Jan 21 15:46:43 crc kubenswrapper[5021]: I0121 15:46:43.496198 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 21 15:46:43 crc kubenswrapper[5021]: I0121 15:46:43.496310 5021 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 21 15:46:43 crc kubenswrapper[5021]: I0121 15:46:43.518109 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 21 15:46:46 crc kubenswrapper[5021]: I0121 15:46:46.325777 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3a35604b-95f9-44e2-87d1-27ae8b8ca41c","Type":"ContainerStarted","Data":"bc150f827af5b1450ac6bc9aff5a088d2ce8f7bd65b7139a9f81b71e2224584c"} Jan 21 15:46:49 crc kubenswrapper[5021]: I0121 15:46:49.353616 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3a35604b-95f9-44e2-87d1-27ae8b8ca41c","Type":"ContainerStarted","Data":"ec33d6895f7d1a1d03aa9aa3d0ef80ce923ba6820f9c6b4fc5a65f12fb2e6cc9"} Jan 21 15:46:50 crc kubenswrapper[5021]: I0121 15:46:50.364642 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3a35604b-95f9-44e2-87d1-27ae8b8ca41c","Type":"ContainerStarted","Data":"86dfd4af1ee1fece879a5b0ceeaca15ceeedb95e18fba18e56584000e1cf6e1d"} Jan 21 15:46:50 crc kubenswrapper[5021]: I0121 15:46:50.365264 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 21 15:46:50 crc kubenswrapper[5021]: I0121 15:46:50.364961 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3a35604b-95f9-44e2-87d1-27ae8b8ca41c" containerName="ceilometer-central-agent" containerID="cri-o://b6d6af3abe4e3cc720705d7881fbdac97c46933d42262fdf4d4d40ade1b37035" gracePeriod=30 Jan 21 15:46:50 crc kubenswrapper[5021]: I0121 15:46:50.365468 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3a35604b-95f9-44e2-87d1-27ae8b8ca41c" containerName="proxy-httpd" containerID="cri-o://86dfd4af1ee1fece879a5b0ceeaca15ceeedb95e18fba18e56584000e1cf6e1d" gracePeriod=30 Jan 21 15:46:50 crc kubenswrapper[5021]: I0121 15:46:50.366034 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3a35604b-95f9-44e2-87d1-27ae8b8ca41c" containerName="ceilometer-notification-agent" containerID="cri-o://bc150f827af5b1450ac6bc9aff5a088d2ce8f7bd65b7139a9f81b71e2224584c" gracePeriod=30 Jan 21 15:46:50 crc kubenswrapper[5021]: I0121 15:46:50.366094 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3a35604b-95f9-44e2-87d1-27ae8b8ca41c" containerName="sg-core" containerID="cri-o://ec33d6895f7d1a1d03aa9aa3d0ef80ce923ba6820f9c6b4fc5a65f12fb2e6cc9" gracePeriod=30 Jan 21 15:46:50 crc kubenswrapper[5021]: I0121 15:46:50.368606 5021 generic.go:334] "Generic (PLEG): container finished" podID="9a0ad139-d743-47ca-aecd-ee8a7ff59a7a" containerID="940f863573ce6eb57381c2fbbca658d251917ef9b33253b0a3d16577b3809021" exitCode=0 Jan 21 15:46:50 crc kubenswrapper[5021]: I0121 15:46:50.368639 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-p4kbs" event={"ID":"9a0ad139-d743-47ca-aecd-ee8a7ff59a7a","Type":"ContainerDied","Data":"940f863573ce6eb57381c2fbbca658d251917ef9b33253b0a3d16577b3809021"} Jan 21 15:46:50 crc kubenswrapper[5021]: I0121 15:46:50.392368 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.699865432 podStartE2EDuration="9.392345611s" podCreationTimestamp="2026-01-21 15:46:41 +0000 UTC" firstStartedPulling="2026-01-21 15:46:42.165201973 +0000 UTC m=+1343.700315862" lastFinishedPulling="2026-01-21 15:46:49.857682152 +0000 UTC m=+1351.392796041" observedRunningTime="2026-01-21 15:46:50.388011873 +0000 UTC m=+1351.923125772" watchObservedRunningTime="2026-01-21 15:46:50.392345611 +0000 UTC m=+1351.927459500" Jan 21 15:46:51 crc kubenswrapper[5021]: I0121 15:46:51.378799 5021 generic.go:334] "Generic (PLEG): container finished" podID="3a35604b-95f9-44e2-87d1-27ae8b8ca41c" containerID="86dfd4af1ee1fece879a5b0ceeaca15ceeedb95e18fba18e56584000e1cf6e1d" exitCode=0 Jan 21 15:46:51 crc kubenswrapper[5021]: I0121 15:46:51.379175 5021 generic.go:334] "Generic (PLEG): container finished" podID="3a35604b-95f9-44e2-87d1-27ae8b8ca41c" containerID="ec33d6895f7d1a1d03aa9aa3d0ef80ce923ba6820f9c6b4fc5a65f12fb2e6cc9" exitCode=2 Jan 21 15:46:51 crc kubenswrapper[5021]: I0121 15:46:51.379189 5021 generic.go:334] "Generic (PLEG): container finished" podID="3a35604b-95f9-44e2-87d1-27ae8b8ca41c" containerID="bc150f827af5b1450ac6bc9aff5a088d2ce8f7bd65b7139a9f81b71e2224584c" exitCode=0 Jan 21 15:46:51 crc kubenswrapper[5021]: I0121 15:46:51.378869 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3a35604b-95f9-44e2-87d1-27ae8b8ca41c","Type":"ContainerDied","Data":"86dfd4af1ee1fece879a5b0ceeaca15ceeedb95e18fba18e56584000e1cf6e1d"} Jan 21 15:46:51 crc kubenswrapper[5021]: I0121 15:46:51.379290 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3a35604b-95f9-44e2-87d1-27ae8b8ca41c","Type":"ContainerDied","Data":"ec33d6895f7d1a1d03aa9aa3d0ef80ce923ba6820f9c6b4fc5a65f12fb2e6cc9"} Jan 21 15:46:51 crc kubenswrapper[5021]: I0121 15:46:51.379314 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3a35604b-95f9-44e2-87d1-27ae8b8ca41c","Type":"ContainerDied","Data":"bc150f827af5b1450ac6bc9aff5a088d2ce8f7bd65b7139a9f81b71e2224584c"} Jan 21 15:46:51 crc kubenswrapper[5021]: I0121 15:46:51.717358 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-p4kbs" Jan 21 15:46:51 crc kubenswrapper[5021]: I0121 15:46:51.843638 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r5l5p\" (UniqueName: \"kubernetes.io/projected/9a0ad139-d743-47ca-aecd-ee8a7ff59a7a-kube-api-access-r5l5p\") pod \"9a0ad139-d743-47ca-aecd-ee8a7ff59a7a\" (UID: \"9a0ad139-d743-47ca-aecd-ee8a7ff59a7a\") " Jan 21 15:46:51 crc kubenswrapper[5021]: I0121 15:46:51.843722 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9a0ad139-d743-47ca-aecd-ee8a7ff59a7a-scripts\") pod \"9a0ad139-d743-47ca-aecd-ee8a7ff59a7a\" (UID: \"9a0ad139-d743-47ca-aecd-ee8a7ff59a7a\") " Jan 21 15:46:51 crc kubenswrapper[5021]: I0121 15:46:51.844013 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9a0ad139-d743-47ca-aecd-ee8a7ff59a7a-config-data\") pod \"9a0ad139-d743-47ca-aecd-ee8a7ff59a7a\" (UID: \"9a0ad139-d743-47ca-aecd-ee8a7ff59a7a\") " Jan 21 15:46:51 crc kubenswrapper[5021]: I0121 15:46:51.844151 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9a0ad139-d743-47ca-aecd-ee8a7ff59a7a-combined-ca-bundle\") pod \"9a0ad139-d743-47ca-aecd-ee8a7ff59a7a\" (UID: \"9a0ad139-d743-47ca-aecd-ee8a7ff59a7a\") " Jan 21 15:46:51 crc kubenswrapper[5021]: I0121 15:46:51.850189 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9a0ad139-d743-47ca-aecd-ee8a7ff59a7a-kube-api-access-r5l5p" (OuterVolumeSpecName: "kube-api-access-r5l5p") pod "9a0ad139-d743-47ca-aecd-ee8a7ff59a7a" (UID: "9a0ad139-d743-47ca-aecd-ee8a7ff59a7a"). InnerVolumeSpecName "kube-api-access-r5l5p". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:46:51 crc kubenswrapper[5021]: I0121 15:46:51.854049 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9a0ad139-d743-47ca-aecd-ee8a7ff59a7a-scripts" (OuterVolumeSpecName: "scripts") pod "9a0ad139-d743-47ca-aecd-ee8a7ff59a7a" (UID: "9a0ad139-d743-47ca-aecd-ee8a7ff59a7a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:46:51 crc kubenswrapper[5021]: I0121 15:46:51.872332 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9a0ad139-d743-47ca-aecd-ee8a7ff59a7a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9a0ad139-d743-47ca-aecd-ee8a7ff59a7a" (UID: "9a0ad139-d743-47ca-aecd-ee8a7ff59a7a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:46:51 crc kubenswrapper[5021]: I0121 15:46:51.877162 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9a0ad139-d743-47ca-aecd-ee8a7ff59a7a-config-data" (OuterVolumeSpecName: "config-data") pod "9a0ad139-d743-47ca-aecd-ee8a7ff59a7a" (UID: "9a0ad139-d743-47ca-aecd-ee8a7ff59a7a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:46:51 crc kubenswrapper[5021]: I0121 15:46:51.947682 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9a0ad139-d743-47ca-aecd-ee8a7ff59a7a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:51 crc kubenswrapper[5021]: I0121 15:46:51.947729 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r5l5p\" (UniqueName: \"kubernetes.io/projected/9a0ad139-d743-47ca-aecd-ee8a7ff59a7a-kube-api-access-r5l5p\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:51 crc kubenswrapper[5021]: I0121 15:46:51.947744 5021 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9a0ad139-d743-47ca-aecd-ee8a7ff59a7a-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:51 crc kubenswrapper[5021]: I0121 15:46:51.947755 5021 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9a0ad139-d743-47ca-aecd-ee8a7ff59a7a-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:52 crc kubenswrapper[5021]: I0121 15:46:52.390627 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-p4kbs" event={"ID":"9a0ad139-d743-47ca-aecd-ee8a7ff59a7a","Type":"ContainerDied","Data":"8c79ec307f2d19e5a94f8bf0685cbf52336b76b80d5f22a299bd44f0b35d6723"} Jan 21 15:46:52 crc kubenswrapper[5021]: I0121 15:46:52.391154 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8c79ec307f2d19e5a94f8bf0685cbf52336b76b80d5f22a299bd44f0b35d6723" Jan 21 15:46:52 crc kubenswrapper[5021]: I0121 15:46:52.390676 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-p4kbs" Jan 21 15:46:52 crc kubenswrapper[5021]: I0121 15:46:52.544699 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 21 15:46:52 crc kubenswrapper[5021]: E0121 15:46:52.545469 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a0ad139-d743-47ca-aecd-ee8a7ff59a7a" containerName="nova-cell0-conductor-db-sync" Jan 21 15:46:52 crc kubenswrapper[5021]: I0121 15:46:52.545483 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a0ad139-d743-47ca-aecd-ee8a7ff59a7a" containerName="nova-cell0-conductor-db-sync" Jan 21 15:46:52 crc kubenswrapper[5021]: I0121 15:46:52.545655 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="9a0ad139-d743-47ca-aecd-ee8a7ff59a7a" containerName="nova-cell0-conductor-db-sync" Jan 21 15:46:52 crc kubenswrapper[5021]: I0121 15:46:52.546230 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 21 15:46:52 crc kubenswrapper[5021]: I0121 15:46:52.549118 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-hkbfs" Jan 21 15:46:52 crc kubenswrapper[5021]: I0121 15:46:52.549341 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 21 15:46:52 crc kubenswrapper[5021]: I0121 15:46:52.571735 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 21 15:46:52 crc kubenswrapper[5021]: I0121 15:46:52.658694 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e56d063f-18e5-49af-8bfc-892629a34e88-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"e56d063f-18e5-49af-8bfc-892629a34e88\") " pod="openstack/nova-cell0-conductor-0" Jan 21 15:46:52 crc kubenswrapper[5021]: I0121 15:46:52.658796 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-85vfh\" (UniqueName: \"kubernetes.io/projected/e56d063f-18e5-49af-8bfc-892629a34e88-kube-api-access-85vfh\") pod \"nova-cell0-conductor-0\" (UID: \"e56d063f-18e5-49af-8bfc-892629a34e88\") " pod="openstack/nova-cell0-conductor-0" Jan 21 15:46:52 crc kubenswrapper[5021]: I0121 15:46:52.659005 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e56d063f-18e5-49af-8bfc-892629a34e88-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"e56d063f-18e5-49af-8bfc-892629a34e88\") " pod="openstack/nova-cell0-conductor-0" Jan 21 15:46:52 crc kubenswrapper[5021]: I0121 15:46:52.761110 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-85vfh\" (UniqueName: \"kubernetes.io/projected/e56d063f-18e5-49af-8bfc-892629a34e88-kube-api-access-85vfh\") pod \"nova-cell0-conductor-0\" (UID: \"e56d063f-18e5-49af-8bfc-892629a34e88\") " pod="openstack/nova-cell0-conductor-0" Jan 21 15:46:52 crc kubenswrapper[5021]: I0121 15:46:52.761179 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e56d063f-18e5-49af-8bfc-892629a34e88-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"e56d063f-18e5-49af-8bfc-892629a34e88\") " pod="openstack/nova-cell0-conductor-0" Jan 21 15:46:52 crc kubenswrapper[5021]: I0121 15:46:52.761375 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e56d063f-18e5-49af-8bfc-892629a34e88-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"e56d063f-18e5-49af-8bfc-892629a34e88\") " pod="openstack/nova-cell0-conductor-0" Jan 21 15:46:52 crc kubenswrapper[5021]: I0121 15:46:52.765693 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e56d063f-18e5-49af-8bfc-892629a34e88-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"e56d063f-18e5-49af-8bfc-892629a34e88\") " pod="openstack/nova-cell0-conductor-0" Jan 21 15:46:52 crc kubenswrapper[5021]: I0121 15:46:52.769160 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e56d063f-18e5-49af-8bfc-892629a34e88-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"e56d063f-18e5-49af-8bfc-892629a34e88\") " pod="openstack/nova-cell0-conductor-0" Jan 21 15:46:52 crc kubenswrapper[5021]: I0121 15:46:52.784557 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-85vfh\" (UniqueName: \"kubernetes.io/projected/e56d063f-18e5-49af-8bfc-892629a34e88-kube-api-access-85vfh\") pod \"nova-cell0-conductor-0\" (UID: \"e56d063f-18e5-49af-8bfc-892629a34e88\") " pod="openstack/nova-cell0-conductor-0" Jan 21 15:46:52 crc kubenswrapper[5021]: I0121 15:46:52.874250 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 21 15:46:53 crc kubenswrapper[5021]: I0121 15:46:53.311941 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 21 15:46:53 crc kubenswrapper[5021]: I0121 15:46:53.400783 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"e56d063f-18e5-49af-8bfc-892629a34e88","Type":"ContainerStarted","Data":"fd862b058cdb045584f4bc5c6ec6b0cd3b7643c4dc4aa55c188f531ac7f1d401"} Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.316551 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.390189 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3a35604b-95f9-44e2-87d1-27ae8b8ca41c-log-httpd\") pod \"3a35604b-95f9-44e2-87d1-27ae8b8ca41c\" (UID: \"3a35604b-95f9-44e2-87d1-27ae8b8ca41c\") " Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.390330 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a35604b-95f9-44e2-87d1-27ae8b8ca41c-config-data\") pod \"3a35604b-95f9-44e2-87d1-27ae8b8ca41c\" (UID: \"3a35604b-95f9-44e2-87d1-27ae8b8ca41c\") " Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.390400 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a35604b-95f9-44e2-87d1-27ae8b8ca41c-combined-ca-bundle\") pod \"3a35604b-95f9-44e2-87d1-27ae8b8ca41c\" (UID: \"3a35604b-95f9-44e2-87d1-27ae8b8ca41c\") " Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.390455 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x58p2\" (UniqueName: \"kubernetes.io/projected/3a35604b-95f9-44e2-87d1-27ae8b8ca41c-kube-api-access-x58p2\") pod \"3a35604b-95f9-44e2-87d1-27ae8b8ca41c\" (UID: \"3a35604b-95f9-44e2-87d1-27ae8b8ca41c\") " Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.390493 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3a35604b-95f9-44e2-87d1-27ae8b8ca41c-sg-core-conf-yaml\") pod \"3a35604b-95f9-44e2-87d1-27ae8b8ca41c\" (UID: \"3a35604b-95f9-44e2-87d1-27ae8b8ca41c\") " Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.390516 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3a35604b-95f9-44e2-87d1-27ae8b8ca41c-ceilometer-tls-certs\") pod \"3a35604b-95f9-44e2-87d1-27ae8b8ca41c\" (UID: \"3a35604b-95f9-44e2-87d1-27ae8b8ca41c\") " Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.390537 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3a35604b-95f9-44e2-87d1-27ae8b8ca41c-scripts\") pod \"3a35604b-95f9-44e2-87d1-27ae8b8ca41c\" (UID: \"3a35604b-95f9-44e2-87d1-27ae8b8ca41c\") " Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.390632 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3a35604b-95f9-44e2-87d1-27ae8b8ca41c-run-httpd\") pod \"3a35604b-95f9-44e2-87d1-27ae8b8ca41c\" (UID: \"3a35604b-95f9-44e2-87d1-27ae8b8ca41c\") " Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.391469 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3a35604b-95f9-44e2-87d1-27ae8b8ca41c-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "3a35604b-95f9-44e2-87d1-27ae8b8ca41c" (UID: "3a35604b-95f9-44e2-87d1-27ae8b8ca41c"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.391512 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3a35604b-95f9-44e2-87d1-27ae8b8ca41c-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "3a35604b-95f9-44e2-87d1-27ae8b8ca41c" (UID: "3a35604b-95f9-44e2-87d1-27ae8b8ca41c"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.396796 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3a35604b-95f9-44e2-87d1-27ae8b8ca41c-scripts" (OuterVolumeSpecName: "scripts") pod "3a35604b-95f9-44e2-87d1-27ae8b8ca41c" (UID: "3a35604b-95f9-44e2-87d1-27ae8b8ca41c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.411656 5021 generic.go:334] "Generic (PLEG): container finished" podID="3a35604b-95f9-44e2-87d1-27ae8b8ca41c" containerID="b6d6af3abe4e3cc720705d7881fbdac97c46933d42262fdf4d4d40ade1b37035" exitCode=0 Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.411757 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3a35604b-95f9-44e2-87d1-27ae8b8ca41c","Type":"ContainerDied","Data":"b6d6af3abe4e3cc720705d7881fbdac97c46933d42262fdf4d4d40ade1b37035"} Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.411829 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3a35604b-95f9-44e2-87d1-27ae8b8ca41c","Type":"ContainerDied","Data":"04229b6f8504862a6851be658ccad012e548cdd37176ef1fc9e6c93138410be6"} Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.411852 5021 scope.go:117] "RemoveContainer" containerID="86dfd4af1ee1fece879a5b0ceeaca15ceeedb95e18fba18e56584000e1cf6e1d" Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.411781 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.418674 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3a35604b-95f9-44e2-87d1-27ae8b8ca41c-kube-api-access-x58p2" (OuterVolumeSpecName: "kube-api-access-x58p2") pod "3a35604b-95f9-44e2-87d1-27ae8b8ca41c" (UID: "3a35604b-95f9-44e2-87d1-27ae8b8ca41c"). InnerVolumeSpecName "kube-api-access-x58p2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.418763 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"e56d063f-18e5-49af-8bfc-892629a34e88","Type":"ContainerStarted","Data":"fbc7ac5c64b14be83eb8080ee8b54339e51698653f7de8500a2e5fc7fd361ff2"} Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.419397 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.422877 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3a35604b-95f9-44e2-87d1-27ae8b8ca41c-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "3a35604b-95f9-44e2-87d1-27ae8b8ca41c" (UID: "3a35604b-95f9-44e2-87d1-27ae8b8ca41c"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.444561 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3a35604b-95f9-44e2-87d1-27ae8b8ca41c-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "3a35604b-95f9-44e2-87d1-27ae8b8ca41c" (UID: "3a35604b-95f9-44e2-87d1-27ae8b8ca41c"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.444937 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.444897951 podStartE2EDuration="2.444897951s" podCreationTimestamp="2026-01-21 15:46:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:46:54.439608537 +0000 UTC m=+1355.974722426" watchObservedRunningTime="2026-01-21 15:46:54.444897951 +0000 UTC m=+1355.980011840" Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.490960 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3a35604b-95f9-44e2-87d1-27ae8b8ca41c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3a35604b-95f9-44e2-87d1-27ae8b8ca41c" (UID: "3a35604b-95f9-44e2-87d1-27ae8b8ca41c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.495661 5021 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3a35604b-95f9-44e2-87d1-27ae8b8ca41c-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.495787 5021 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3a35604b-95f9-44e2-87d1-27ae8b8ca41c-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.496123 5021 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3a35604b-95f9-44e2-87d1-27ae8b8ca41c-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.496209 5021 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3a35604b-95f9-44e2-87d1-27ae8b8ca41c-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.496270 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a35604b-95f9-44e2-87d1-27ae8b8ca41c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.496337 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x58p2\" (UniqueName: \"kubernetes.io/projected/3a35604b-95f9-44e2-87d1-27ae8b8ca41c-kube-api-access-x58p2\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.496404 5021 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3a35604b-95f9-44e2-87d1-27ae8b8ca41c-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.496530 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3a35604b-95f9-44e2-87d1-27ae8b8ca41c-config-data" (OuterVolumeSpecName: "config-data") pod "3a35604b-95f9-44e2-87d1-27ae8b8ca41c" (UID: "3a35604b-95f9-44e2-87d1-27ae8b8ca41c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.519892 5021 scope.go:117] "RemoveContainer" containerID="ec33d6895f7d1a1d03aa9aa3d0ef80ce923ba6820f9c6b4fc5a65f12fb2e6cc9" Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.553365 5021 scope.go:117] "RemoveContainer" containerID="bc150f827af5b1450ac6bc9aff5a088d2ce8f7bd65b7139a9f81b71e2224584c" Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.575717 5021 scope.go:117] "RemoveContainer" containerID="b6d6af3abe4e3cc720705d7881fbdac97c46933d42262fdf4d4d40ade1b37035" Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.594035 5021 scope.go:117] "RemoveContainer" containerID="86dfd4af1ee1fece879a5b0ceeaca15ceeedb95e18fba18e56584000e1cf6e1d" Jan 21 15:46:54 crc kubenswrapper[5021]: E0121 15:46:54.594563 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"86dfd4af1ee1fece879a5b0ceeaca15ceeedb95e18fba18e56584000e1cf6e1d\": container with ID starting with 86dfd4af1ee1fece879a5b0ceeaca15ceeedb95e18fba18e56584000e1cf6e1d not found: ID does not exist" containerID="86dfd4af1ee1fece879a5b0ceeaca15ceeedb95e18fba18e56584000e1cf6e1d" Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.594686 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86dfd4af1ee1fece879a5b0ceeaca15ceeedb95e18fba18e56584000e1cf6e1d"} err="failed to get container status \"86dfd4af1ee1fece879a5b0ceeaca15ceeedb95e18fba18e56584000e1cf6e1d\": rpc error: code = NotFound desc = could not find container \"86dfd4af1ee1fece879a5b0ceeaca15ceeedb95e18fba18e56584000e1cf6e1d\": container with ID starting with 86dfd4af1ee1fece879a5b0ceeaca15ceeedb95e18fba18e56584000e1cf6e1d not found: ID does not exist" Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.594786 5021 scope.go:117] "RemoveContainer" containerID="ec33d6895f7d1a1d03aa9aa3d0ef80ce923ba6820f9c6b4fc5a65f12fb2e6cc9" Jan 21 15:46:54 crc kubenswrapper[5021]: E0121 15:46:54.595248 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ec33d6895f7d1a1d03aa9aa3d0ef80ce923ba6820f9c6b4fc5a65f12fb2e6cc9\": container with ID starting with ec33d6895f7d1a1d03aa9aa3d0ef80ce923ba6820f9c6b4fc5a65f12fb2e6cc9 not found: ID does not exist" containerID="ec33d6895f7d1a1d03aa9aa3d0ef80ce923ba6820f9c6b4fc5a65f12fb2e6cc9" Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.595280 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ec33d6895f7d1a1d03aa9aa3d0ef80ce923ba6820f9c6b4fc5a65f12fb2e6cc9"} err="failed to get container status \"ec33d6895f7d1a1d03aa9aa3d0ef80ce923ba6820f9c6b4fc5a65f12fb2e6cc9\": rpc error: code = NotFound desc = could not find container \"ec33d6895f7d1a1d03aa9aa3d0ef80ce923ba6820f9c6b4fc5a65f12fb2e6cc9\": container with ID starting with ec33d6895f7d1a1d03aa9aa3d0ef80ce923ba6820f9c6b4fc5a65f12fb2e6cc9 not found: ID does not exist" Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.595299 5021 scope.go:117] "RemoveContainer" containerID="bc150f827af5b1450ac6bc9aff5a088d2ce8f7bd65b7139a9f81b71e2224584c" Jan 21 15:46:54 crc kubenswrapper[5021]: E0121 15:46:54.595583 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bc150f827af5b1450ac6bc9aff5a088d2ce8f7bd65b7139a9f81b71e2224584c\": container with ID starting with bc150f827af5b1450ac6bc9aff5a088d2ce8f7bd65b7139a9f81b71e2224584c not found: ID does not exist" containerID="bc150f827af5b1450ac6bc9aff5a088d2ce8f7bd65b7139a9f81b71e2224584c" Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.595713 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bc150f827af5b1450ac6bc9aff5a088d2ce8f7bd65b7139a9f81b71e2224584c"} err="failed to get container status \"bc150f827af5b1450ac6bc9aff5a088d2ce8f7bd65b7139a9f81b71e2224584c\": rpc error: code = NotFound desc = could not find container \"bc150f827af5b1450ac6bc9aff5a088d2ce8f7bd65b7139a9f81b71e2224584c\": container with ID starting with bc150f827af5b1450ac6bc9aff5a088d2ce8f7bd65b7139a9f81b71e2224584c not found: ID does not exist" Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.595797 5021 scope.go:117] "RemoveContainer" containerID="b6d6af3abe4e3cc720705d7881fbdac97c46933d42262fdf4d4d40ade1b37035" Jan 21 15:46:54 crc kubenswrapper[5021]: E0121 15:46:54.596320 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b6d6af3abe4e3cc720705d7881fbdac97c46933d42262fdf4d4d40ade1b37035\": container with ID starting with b6d6af3abe4e3cc720705d7881fbdac97c46933d42262fdf4d4d40ade1b37035 not found: ID does not exist" containerID="b6d6af3abe4e3cc720705d7881fbdac97c46933d42262fdf4d4d40ade1b37035" Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.596401 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b6d6af3abe4e3cc720705d7881fbdac97c46933d42262fdf4d4d40ade1b37035"} err="failed to get container status \"b6d6af3abe4e3cc720705d7881fbdac97c46933d42262fdf4d4d40ade1b37035\": rpc error: code = NotFound desc = could not find container \"b6d6af3abe4e3cc720705d7881fbdac97c46933d42262fdf4d4d40ade1b37035\": container with ID starting with b6d6af3abe4e3cc720705d7881fbdac97c46933d42262fdf4d4d40ade1b37035 not found: ID does not exist" Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.597559 5021 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a35604b-95f9-44e2-87d1-27ae8b8ca41c-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.768175 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.771436 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.788558 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 21 15:46:54 crc kubenswrapper[5021]: E0121 15:46:54.789270 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a35604b-95f9-44e2-87d1-27ae8b8ca41c" containerName="ceilometer-central-agent" Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.789288 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a35604b-95f9-44e2-87d1-27ae8b8ca41c" containerName="ceilometer-central-agent" Jan 21 15:46:54 crc kubenswrapper[5021]: E0121 15:46:54.789309 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a35604b-95f9-44e2-87d1-27ae8b8ca41c" containerName="ceilometer-notification-agent" Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.789316 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a35604b-95f9-44e2-87d1-27ae8b8ca41c" containerName="ceilometer-notification-agent" Jan 21 15:46:54 crc kubenswrapper[5021]: E0121 15:46:54.789349 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a35604b-95f9-44e2-87d1-27ae8b8ca41c" containerName="proxy-httpd" Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.789357 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a35604b-95f9-44e2-87d1-27ae8b8ca41c" containerName="proxy-httpd" Jan 21 15:46:54 crc kubenswrapper[5021]: E0121 15:46:54.789370 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a35604b-95f9-44e2-87d1-27ae8b8ca41c" containerName="sg-core" Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.789377 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a35604b-95f9-44e2-87d1-27ae8b8ca41c" containerName="sg-core" Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.789575 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a35604b-95f9-44e2-87d1-27ae8b8ca41c" containerName="proxy-httpd" Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.789593 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a35604b-95f9-44e2-87d1-27ae8b8ca41c" containerName="ceilometer-notification-agent" Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.789610 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a35604b-95f9-44e2-87d1-27ae8b8ca41c" containerName="sg-core" Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.789618 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a35604b-95f9-44e2-87d1-27ae8b8ca41c" containerName="ceilometer-central-agent" Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.791614 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.796512 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.796688 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.796932 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.797022 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.902190 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f83da032-e822-4650-8896-8fc86b81a081-run-httpd\") pod \"ceilometer-0\" (UID: \"f83da032-e822-4650-8896-8fc86b81a081\") " pod="openstack/ceilometer-0" Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.902263 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f83da032-e822-4650-8896-8fc86b81a081-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f83da032-e822-4650-8896-8fc86b81a081\") " pod="openstack/ceilometer-0" Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.902537 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f83da032-e822-4650-8896-8fc86b81a081-config-data\") pod \"ceilometer-0\" (UID: \"f83da032-e822-4650-8896-8fc86b81a081\") " pod="openstack/ceilometer-0" Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.902659 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f83da032-e822-4650-8896-8fc86b81a081-scripts\") pod \"ceilometer-0\" (UID: \"f83da032-e822-4650-8896-8fc86b81a081\") " pod="openstack/ceilometer-0" Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.902942 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f83da032-e822-4650-8896-8fc86b81a081-log-httpd\") pod \"ceilometer-0\" (UID: \"f83da032-e822-4650-8896-8fc86b81a081\") " pod="openstack/ceilometer-0" Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.903054 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f83da032-e822-4650-8896-8fc86b81a081-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"f83da032-e822-4650-8896-8fc86b81a081\") " pod="openstack/ceilometer-0" Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.903127 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bsv96\" (UniqueName: \"kubernetes.io/projected/f83da032-e822-4650-8896-8fc86b81a081-kube-api-access-bsv96\") pod \"ceilometer-0\" (UID: \"f83da032-e822-4650-8896-8fc86b81a081\") " pod="openstack/ceilometer-0" Jan 21 15:46:54 crc kubenswrapper[5021]: I0121 15:46:54.903270 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f83da032-e822-4650-8896-8fc86b81a081-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f83da032-e822-4650-8896-8fc86b81a081\") " pod="openstack/ceilometer-0" Jan 21 15:46:55 crc kubenswrapper[5021]: I0121 15:46:55.005715 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f83da032-e822-4650-8896-8fc86b81a081-log-httpd\") pod \"ceilometer-0\" (UID: \"f83da032-e822-4650-8896-8fc86b81a081\") " pod="openstack/ceilometer-0" Jan 21 15:46:55 crc kubenswrapper[5021]: I0121 15:46:55.005782 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f83da032-e822-4650-8896-8fc86b81a081-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"f83da032-e822-4650-8896-8fc86b81a081\") " pod="openstack/ceilometer-0" Jan 21 15:46:55 crc kubenswrapper[5021]: I0121 15:46:55.005821 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bsv96\" (UniqueName: \"kubernetes.io/projected/f83da032-e822-4650-8896-8fc86b81a081-kube-api-access-bsv96\") pod \"ceilometer-0\" (UID: \"f83da032-e822-4650-8896-8fc86b81a081\") " pod="openstack/ceilometer-0" Jan 21 15:46:55 crc kubenswrapper[5021]: I0121 15:46:55.005895 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f83da032-e822-4650-8896-8fc86b81a081-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f83da032-e822-4650-8896-8fc86b81a081\") " pod="openstack/ceilometer-0" Jan 21 15:46:55 crc kubenswrapper[5021]: I0121 15:46:55.005983 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f83da032-e822-4650-8896-8fc86b81a081-run-httpd\") pod \"ceilometer-0\" (UID: \"f83da032-e822-4650-8896-8fc86b81a081\") " pod="openstack/ceilometer-0" Jan 21 15:46:55 crc kubenswrapper[5021]: I0121 15:46:55.006011 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f83da032-e822-4650-8896-8fc86b81a081-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f83da032-e822-4650-8896-8fc86b81a081\") " pod="openstack/ceilometer-0" Jan 21 15:46:55 crc kubenswrapper[5021]: I0121 15:46:55.006057 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f83da032-e822-4650-8896-8fc86b81a081-config-data\") pod \"ceilometer-0\" (UID: \"f83da032-e822-4650-8896-8fc86b81a081\") " pod="openstack/ceilometer-0" Jan 21 15:46:55 crc kubenswrapper[5021]: I0121 15:46:55.006079 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f83da032-e822-4650-8896-8fc86b81a081-scripts\") pod \"ceilometer-0\" (UID: \"f83da032-e822-4650-8896-8fc86b81a081\") " pod="openstack/ceilometer-0" Jan 21 15:46:55 crc kubenswrapper[5021]: I0121 15:46:55.006553 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f83da032-e822-4650-8896-8fc86b81a081-log-httpd\") pod \"ceilometer-0\" (UID: \"f83da032-e822-4650-8896-8fc86b81a081\") " pod="openstack/ceilometer-0" Jan 21 15:46:55 crc kubenswrapper[5021]: I0121 15:46:55.008355 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f83da032-e822-4650-8896-8fc86b81a081-run-httpd\") pod \"ceilometer-0\" (UID: \"f83da032-e822-4650-8896-8fc86b81a081\") " pod="openstack/ceilometer-0" Jan 21 15:46:55 crc kubenswrapper[5021]: I0121 15:46:55.012942 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f83da032-e822-4650-8896-8fc86b81a081-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f83da032-e822-4650-8896-8fc86b81a081\") " pod="openstack/ceilometer-0" Jan 21 15:46:55 crc kubenswrapper[5021]: I0121 15:46:55.013133 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f83da032-e822-4650-8896-8fc86b81a081-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"f83da032-e822-4650-8896-8fc86b81a081\") " pod="openstack/ceilometer-0" Jan 21 15:46:55 crc kubenswrapper[5021]: I0121 15:46:55.014155 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f83da032-e822-4650-8896-8fc86b81a081-scripts\") pod \"ceilometer-0\" (UID: \"f83da032-e822-4650-8896-8fc86b81a081\") " pod="openstack/ceilometer-0" Jan 21 15:46:55 crc kubenswrapper[5021]: I0121 15:46:55.016140 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f83da032-e822-4650-8896-8fc86b81a081-config-data\") pod \"ceilometer-0\" (UID: \"f83da032-e822-4650-8896-8fc86b81a081\") " pod="openstack/ceilometer-0" Jan 21 15:46:55 crc kubenswrapper[5021]: I0121 15:46:55.017563 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f83da032-e822-4650-8896-8fc86b81a081-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f83da032-e822-4650-8896-8fc86b81a081\") " pod="openstack/ceilometer-0" Jan 21 15:46:55 crc kubenswrapper[5021]: I0121 15:46:55.025169 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bsv96\" (UniqueName: \"kubernetes.io/projected/f83da032-e822-4650-8896-8fc86b81a081-kube-api-access-bsv96\") pod \"ceilometer-0\" (UID: \"f83da032-e822-4650-8896-8fc86b81a081\") " pod="openstack/ceilometer-0" Jan 21 15:46:55 crc kubenswrapper[5021]: I0121 15:46:55.132053 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 21 15:46:55 crc kubenswrapper[5021]: W0121 15:46:55.640619 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf83da032_e822_4650_8896_8fc86b81a081.slice/crio-7e4217a362bd65fe55a98cf6c1da2cec8ee9d6d7a200be101e1426d765f9ece9 WatchSource:0}: Error finding container 7e4217a362bd65fe55a98cf6c1da2cec8ee9d6d7a200be101e1426d765f9ece9: Status 404 returned error can't find the container with id 7e4217a362bd65fe55a98cf6c1da2cec8ee9d6d7a200be101e1426d765f9ece9 Jan 21 15:46:55 crc kubenswrapper[5021]: I0121 15:46:55.654036 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 21 15:46:56 crc kubenswrapper[5021]: I0121 15:46:56.441078 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f83da032-e822-4650-8896-8fc86b81a081","Type":"ContainerStarted","Data":"7e4217a362bd65fe55a98cf6c1da2cec8ee9d6d7a200be101e1426d765f9ece9"} Jan 21 15:46:56 crc kubenswrapper[5021]: I0121 15:46:56.747923 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3a35604b-95f9-44e2-87d1-27ae8b8ca41c" path="/var/lib/kubelet/pods/3a35604b-95f9-44e2-87d1-27ae8b8ca41c/volumes" Jan 21 15:46:57 crc kubenswrapper[5021]: I0121 15:46:57.453497 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f83da032-e822-4650-8896-8fc86b81a081","Type":"ContainerStarted","Data":"2fe27286fc696de2912f3ca1c8f3f80477579502971375d7979b1752fffe3ab5"} Jan 21 15:46:58 crc kubenswrapper[5021]: I0121 15:46:58.465725 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f83da032-e822-4650-8896-8fc86b81a081","Type":"ContainerStarted","Data":"a4d66d922163c42defd3f72405093645235a7a8b3072cd9967db3ad881dcf7ef"} Jan 21 15:46:58 crc kubenswrapper[5021]: I0121 15:46:58.466061 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f83da032-e822-4650-8896-8fc86b81a081","Type":"ContainerStarted","Data":"b0180964b73b70183e501237d00aaaf411fec880e4b2d7aab7f930ac6d298dc2"} Jan 21 15:47:00 crc kubenswrapper[5021]: I0121 15:47:00.486991 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f83da032-e822-4650-8896-8fc86b81a081","Type":"ContainerStarted","Data":"df02703a4c529283cc77edfe3ac44c3e5122d4f268c215d2e025ed62f64682aa"} Jan 21 15:47:00 crc kubenswrapper[5021]: I0121 15:47:00.487591 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 21 15:47:00 crc kubenswrapper[5021]: I0121 15:47:00.515986 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.546811103 podStartE2EDuration="6.515960391s" podCreationTimestamp="2026-01-21 15:46:54 +0000 UTC" firstStartedPulling="2026-01-21 15:46:55.644049715 +0000 UTC m=+1357.179163604" lastFinishedPulling="2026-01-21 15:46:59.613198983 +0000 UTC m=+1361.148312892" observedRunningTime="2026-01-21 15:47:00.506965567 +0000 UTC m=+1362.042079456" watchObservedRunningTime="2026-01-21 15:47:00.515960391 +0000 UTC m=+1362.051074280" Jan 21 15:47:02 crc kubenswrapper[5021]: I0121 15:47:02.901438 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.320038 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-rl28t"] Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.321136 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-rl28t" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.323492 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.326980 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.334379 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-rl28t"] Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.461470 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e81a613-3013-46ca-9964-ad6c7deea2b2-config-data\") pod \"nova-cell0-cell-mapping-rl28t\" (UID: \"8e81a613-3013-46ca-9964-ad6c7deea2b2\") " pod="openstack/nova-cell0-cell-mapping-rl28t" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.461572 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e81a613-3013-46ca-9964-ad6c7deea2b2-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-rl28t\" (UID: \"8e81a613-3013-46ca-9964-ad6c7deea2b2\") " pod="openstack/nova-cell0-cell-mapping-rl28t" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.461814 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8e81a613-3013-46ca-9964-ad6c7deea2b2-scripts\") pod \"nova-cell0-cell-mapping-rl28t\" (UID: \"8e81a613-3013-46ca-9964-ad6c7deea2b2\") " pod="openstack/nova-cell0-cell-mapping-rl28t" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.462037 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wrt9h\" (UniqueName: \"kubernetes.io/projected/8e81a613-3013-46ca-9964-ad6c7deea2b2-kube-api-access-wrt9h\") pod \"nova-cell0-cell-mapping-rl28t\" (UID: \"8e81a613-3013-46ca-9964-ad6c7deea2b2\") " pod="openstack/nova-cell0-cell-mapping-rl28t" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.483083 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.484449 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.486459 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.514334 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.562263 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.564934 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.565261 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b042c44-9a4c-4b67-8874-b04dc4f3c0e6-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"9b042c44-9a4c-4b67-8874-b04dc4f3c0e6\") " pod="openstack/nova-scheduler-0" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.565307 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e81a613-3013-46ca-9964-ad6c7deea2b2-config-data\") pod \"nova-cell0-cell-mapping-rl28t\" (UID: \"8e81a613-3013-46ca-9964-ad6c7deea2b2\") " pod="openstack/nova-cell0-cell-mapping-rl28t" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.565356 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b042c44-9a4c-4b67-8874-b04dc4f3c0e6-config-data\") pod \"nova-scheduler-0\" (UID: \"9b042c44-9a4c-4b67-8874-b04dc4f3c0e6\") " pod="openstack/nova-scheduler-0" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.565409 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e81a613-3013-46ca-9964-ad6c7deea2b2-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-rl28t\" (UID: \"8e81a613-3013-46ca-9964-ad6c7deea2b2\") " pod="openstack/nova-cell0-cell-mapping-rl28t" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.565499 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8e81a613-3013-46ca-9964-ad6c7deea2b2-scripts\") pod \"nova-cell0-cell-mapping-rl28t\" (UID: \"8e81a613-3013-46ca-9964-ad6c7deea2b2\") " pod="openstack/nova-cell0-cell-mapping-rl28t" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.565568 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wrt9h\" (UniqueName: \"kubernetes.io/projected/8e81a613-3013-46ca-9964-ad6c7deea2b2-kube-api-access-wrt9h\") pod \"nova-cell0-cell-mapping-rl28t\" (UID: \"8e81a613-3013-46ca-9964-ad6c7deea2b2\") " pod="openstack/nova-cell0-cell-mapping-rl28t" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.565602 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rwxmr\" (UniqueName: \"kubernetes.io/projected/9b042c44-9a4c-4b67-8874-b04dc4f3c0e6-kube-api-access-rwxmr\") pod \"nova-scheduler-0\" (UID: \"9b042c44-9a4c-4b67-8874-b04dc4f3c0e6\") " pod="openstack/nova-scheduler-0" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.571415 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.575748 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8e81a613-3013-46ca-9964-ad6c7deea2b2-scripts\") pod \"nova-cell0-cell-mapping-rl28t\" (UID: \"8e81a613-3013-46ca-9964-ad6c7deea2b2\") " pod="openstack/nova-cell0-cell-mapping-rl28t" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.585702 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e81a613-3013-46ca-9964-ad6c7deea2b2-config-data\") pod \"nova-cell0-cell-mapping-rl28t\" (UID: \"8e81a613-3013-46ca-9964-ad6c7deea2b2\") " pod="openstack/nova-cell0-cell-mapping-rl28t" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.587329 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e81a613-3013-46ca-9964-ad6c7deea2b2-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-rl28t\" (UID: \"8e81a613-3013-46ca-9964-ad6c7deea2b2\") " pod="openstack/nova-cell0-cell-mapping-rl28t" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.594226 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.648591 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wrt9h\" (UniqueName: \"kubernetes.io/projected/8e81a613-3013-46ca-9964-ad6c7deea2b2-kube-api-access-wrt9h\") pod \"nova-cell0-cell-mapping-rl28t\" (UID: \"8e81a613-3013-46ca-9964-ad6c7deea2b2\") " pod="openstack/nova-cell0-cell-mapping-rl28t" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.649105 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-rl28t" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.668589 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06e71668-be48-4b4a-b4ae-c793c0d0aef7-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"06e71668-be48-4b4a-b4ae-c793c0d0aef7\") " pod="openstack/nova-cell1-novncproxy-0" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.668752 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b042c44-9a4c-4b67-8874-b04dc4f3c0e6-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"9b042c44-9a4c-4b67-8874-b04dc4f3c0e6\") " pod="openstack/nova-scheduler-0" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.668817 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b042c44-9a4c-4b67-8874-b04dc4f3c0e6-config-data\") pod \"nova-scheduler-0\" (UID: \"9b042c44-9a4c-4b67-8874-b04dc4f3c0e6\") " pod="openstack/nova-scheduler-0" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.668895 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ws68h\" (UniqueName: \"kubernetes.io/projected/06e71668-be48-4b4a-b4ae-c793c0d0aef7-kube-api-access-ws68h\") pod \"nova-cell1-novncproxy-0\" (UID: \"06e71668-be48-4b4a-b4ae-c793c0d0aef7\") " pod="openstack/nova-cell1-novncproxy-0" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.668985 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/06e71668-be48-4b4a-b4ae-c793c0d0aef7-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"06e71668-be48-4b4a-b4ae-c793c0d0aef7\") " pod="openstack/nova-cell1-novncproxy-0" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.669062 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rwxmr\" (UniqueName: \"kubernetes.io/projected/9b042c44-9a4c-4b67-8874-b04dc4f3c0e6-kube-api-access-rwxmr\") pod \"nova-scheduler-0\" (UID: \"9b042c44-9a4c-4b67-8874-b04dc4f3c0e6\") " pod="openstack/nova-scheduler-0" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.671466 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.673219 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.678385 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.681799 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b042c44-9a4c-4b67-8874-b04dc4f3c0e6-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"9b042c44-9a4c-4b67-8874-b04dc4f3c0e6\") " pod="openstack/nova-scheduler-0" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.685553 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b042c44-9a4c-4b67-8874-b04dc4f3c0e6-config-data\") pod \"nova-scheduler-0\" (UID: \"9b042c44-9a4c-4b67-8874-b04dc4f3c0e6\") " pod="openstack/nova-scheduler-0" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.722073 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rwxmr\" (UniqueName: \"kubernetes.io/projected/9b042c44-9a4c-4b67-8874-b04dc4f3c0e6-kube-api-access-rwxmr\") pod \"nova-scheduler-0\" (UID: \"9b042c44-9a4c-4b67-8874-b04dc4f3c0e6\") " pod="openstack/nova-scheduler-0" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.722699 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.772970 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/06e71668-be48-4b4a-b4ae-c793c0d0aef7-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"06e71668-be48-4b4a-b4ae-c793c0d0aef7\") " pod="openstack/nova-cell1-novncproxy-0" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.773093 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06e71668-be48-4b4a-b4ae-c793c0d0aef7-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"06e71668-be48-4b4a-b4ae-c793c0d0aef7\") " pod="openstack/nova-cell1-novncproxy-0" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.773150 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62b51ffc-6822-44c8-9f24-597a6616b0a8-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"62b51ffc-6822-44c8-9f24-597a6616b0a8\") " pod="openstack/nova-metadata-0" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.773192 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/62b51ffc-6822-44c8-9f24-597a6616b0a8-logs\") pod \"nova-metadata-0\" (UID: \"62b51ffc-6822-44c8-9f24-597a6616b0a8\") " pod="openstack/nova-metadata-0" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.773234 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ws68h\" (UniqueName: \"kubernetes.io/projected/06e71668-be48-4b4a-b4ae-c793c0d0aef7-kube-api-access-ws68h\") pod \"nova-cell1-novncproxy-0\" (UID: \"06e71668-be48-4b4a-b4ae-c793c0d0aef7\") " pod="openstack/nova-cell1-novncproxy-0" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.773256 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/62b51ffc-6822-44c8-9f24-597a6616b0a8-config-data\") pod \"nova-metadata-0\" (UID: \"62b51ffc-6822-44c8-9f24-597a6616b0a8\") " pod="openstack/nova-metadata-0" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.773279 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4zgj8\" (UniqueName: \"kubernetes.io/projected/62b51ffc-6822-44c8-9f24-597a6616b0a8-kube-api-access-4zgj8\") pod \"nova-metadata-0\" (UID: \"62b51ffc-6822-44c8-9f24-597a6616b0a8\") " pod="openstack/nova-metadata-0" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.780562 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/06e71668-be48-4b4a-b4ae-c793c0d0aef7-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"06e71668-be48-4b4a-b4ae-c793c0d0aef7\") " pod="openstack/nova-cell1-novncproxy-0" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.790130 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06e71668-be48-4b4a-b4ae-c793c0d0aef7-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"06e71668-be48-4b4a-b4ae-c793c0d0aef7\") " pod="openstack/nova-cell1-novncproxy-0" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.810321 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.817670 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.827472 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.827622 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.840757 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.843289 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ws68h\" (UniqueName: \"kubernetes.io/projected/06e71668-be48-4b4a-b4ae-c793c0d0aef7-kube-api-access-ws68h\") pod \"nova-cell1-novncproxy-0\" (UID: \"06e71668-be48-4b4a-b4ae-c793c0d0aef7\") " pod="openstack/nova-cell1-novncproxy-0" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.852239 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-865f5d856f-4ggfx"] Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.852778 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.861115 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-865f5d856f-4ggfx" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.863776 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-865f5d856f-4ggfx"] Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.875618 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/62b51ffc-6822-44c8-9f24-597a6616b0a8-logs\") pod \"nova-metadata-0\" (UID: \"62b51ffc-6822-44c8-9f24-597a6616b0a8\") " pod="openstack/nova-metadata-0" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.875691 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29fd6d1c-f020-4b78-a4a2-93be72b1ef9c-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"29fd6d1c-f020-4b78-a4a2-93be72b1ef9c\") " pod="openstack/nova-api-0" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.875734 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/62b51ffc-6822-44c8-9f24-597a6616b0a8-config-data\") pod \"nova-metadata-0\" (UID: \"62b51ffc-6822-44c8-9f24-597a6616b0a8\") " pod="openstack/nova-metadata-0" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.875757 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4zgj8\" (UniqueName: \"kubernetes.io/projected/62b51ffc-6822-44c8-9f24-597a6616b0a8-kube-api-access-4zgj8\") pod \"nova-metadata-0\" (UID: \"62b51ffc-6822-44c8-9f24-597a6616b0a8\") " pod="openstack/nova-metadata-0" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.875793 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bkh7x\" (UniqueName: \"kubernetes.io/projected/29fd6d1c-f020-4b78-a4a2-93be72b1ef9c-kube-api-access-bkh7x\") pod \"nova-api-0\" (UID: \"29fd6d1c-f020-4b78-a4a2-93be72b1ef9c\") " pod="openstack/nova-api-0" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.876233 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/29fd6d1c-f020-4b78-a4a2-93be72b1ef9c-config-data\") pod \"nova-api-0\" (UID: \"29fd6d1c-f020-4b78-a4a2-93be72b1ef9c\") " pod="openstack/nova-api-0" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.876389 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62b51ffc-6822-44c8-9f24-597a6616b0a8-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"62b51ffc-6822-44c8-9f24-597a6616b0a8\") " pod="openstack/nova-metadata-0" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.876468 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/29fd6d1c-f020-4b78-a4a2-93be72b1ef9c-logs\") pod \"nova-api-0\" (UID: \"29fd6d1c-f020-4b78-a4a2-93be72b1ef9c\") " pod="openstack/nova-api-0" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.876714 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/62b51ffc-6822-44c8-9f24-597a6616b0a8-logs\") pod \"nova-metadata-0\" (UID: \"62b51ffc-6822-44c8-9f24-597a6616b0a8\") " pod="openstack/nova-metadata-0" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.884384 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/62b51ffc-6822-44c8-9f24-597a6616b0a8-config-data\") pod \"nova-metadata-0\" (UID: \"62b51ffc-6822-44c8-9f24-597a6616b0a8\") " pod="openstack/nova-metadata-0" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.894179 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62b51ffc-6822-44c8-9f24-597a6616b0a8-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"62b51ffc-6822-44c8-9f24-597a6616b0a8\") " pod="openstack/nova-metadata-0" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.948689 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4zgj8\" (UniqueName: \"kubernetes.io/projected/62b51ffc-6822-44c8-9f24-597a6616b0a8-kube-api-access-4zgj8\") pod \"nova-metadata-0\" (UID: \"62b51ffc-6822-44c8-9f24-597a6616b0a8\") " pod="openstack/nova-metadata-0" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.988826 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bkh7x\" (UniqueName: \"kubernetes.io/projected/29fd6d1c-f020-4b78-a4a2-93be72b1ef9c-kube-api-access-bkh7x\") pod \"nova-api-0\" (UID: \"29fd6d1c-f020-4b78-a4a2-93be72b1ef9c\") " pod="openstack/nova-api-0" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.989438 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16-ovsdbserver-nb\") pod \"dnsmasq-dns-865f5d856f-4ggfx\" (UID: \"d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16\") " pod="openstack/dnsmasq-dns-865f5d856f-4ggfx" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.989546 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16-config\") pod \"dnsmasq-dns-865f5d856f-4ggfx\" (UID: \"d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16\") " pod="openstack/dnsmasq-dns-865f5d856f-4ggfx" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.989645 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/29fd6d1c-f020-4b78-a4a2-93be72b1ef9c-config-data\") pod \"nova-api-0\" (UID: \"29fd6d1c-f020-4b78-a4a2-93be72b1ef9c\") " pod="openstack/nova-api-0" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.989684 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16-dns-svc\") pod \"dnsmasq-dns-865f5d856f-4ggfx\" (UID: \"d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16\") " pod="openstack/dnsmasq-dns-865f5d856f-4ggfx" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.989759 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/29fd6d1c-f020-4b78-a4a2-93be72b1ef9c-logs\") pod \"nova-api-0\" (UID: \"29fd6d1c-f020-4b78-a4a2-93be72b1ef9c\") " pod="openstack/nova-api-0" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.989845 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16-ovsdbserver-sb\") pod \"dnsmasq-dns-865f5d856f-4ggfx\" (UID: \"d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16\") " pod="openstack/dnsmasq-dns-865f5d856f-4ggfx" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.990766 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/29fd6d1c-f020-4b78-a4a2-93be72b1ef9c-logs\") pod \"nova-api-0\" (UID: \"29fd6d1c-f020-4b78-a4a2-93be72b1ef9c\") " pod="openstack/nova-api-0" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.990804 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16-dns-swift-storage-0\") pod \"dnsmasq-dns-865f5d856f-4ggfx\" (UID: \"d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16\") " pod="openstack/dnsmasq-dns-865f5d856f-4ggfx" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.991246 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zjrwc\" (UniqueName: \"kubernetes.io/projected/d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16-kube-api-access-zjrwc\") pod \"dnsmasq-dns-865f5d856f-4ggfx\" (UID: \"d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16\") " pod="openstack/dnsmasq-dns-865f5d856f-4ggfx" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.991387 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29fd6d1c-f020-4b78-a4a2-93be72b1ef9c-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"29fd6d1c-f020-4b78-a4a2-93be72b1ef9c\") " pod="openstack/nova-api-0" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.995496 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/29fd6d1c-f020-4b78-a4a2-93be72b1ef9c-config-data\") pod \"nova-api-0\" (UID: \"29fd6d1c-f020-4b78-a4a2-93be72b1ef9c\") " pod="openstack/nova-api-0" Jan 21 15:47:03 crc kubenswrapper[5021]: I0121 15:47:03.997734 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29fd6d1c-f020-4b78-a4a2-93be72b1ef9c-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"29fd6d1c-f020-4b78-a4a2-93be72b1ef9c\") " pod="openstack/nova-api-0" Jan 21 15:47:04 crc kubenswrapper[5021]: I0121 15:47:04.005715 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bkh7x\" (UniqueName: \"kubernetes.io/projected/29fd6d1c-f020-4b78-a4a2-93be72b1ef9c-kube-api-access-bkh7x\") pod \"nova-api-0\" (UID: \"29fd6d1c-f020-4b78-a4a2-93be72b1ef9c\") " pod="openstack/nova-api-0" Jan 21 15:47:04 crc kubenswrapper[5021]: I0121 15:47:04.093948 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16-dns-swift-storage-0\") pod \"dnsmasq-dns-865f5d856f-4ggfx\" (UID: \"d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16\") " pod="openstack/dnsmasq-dns-865f5d856f-4ggfx" Jan 21 15:47:04 crc kubenswrapper[5021]: I0121 15:47:04.094006 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zjrwc\" (UniqueName: \"kubernetes.io/projected/d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16-kube-api-access-zjrwc\") pod \"dnsmasq-dns-865f5d856f-4ggfx\" (UID: \"d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16\") " pod="openstack/dnsmasq-dns-865f5d856f-4ggfx" Jan 21 15:47:04 crc kubenswrapper[5021]: I0121 15:47:04.094136 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16-ovsdbserver-nb\") pod \"dnsmasq-dns-865f5d856f-4ggfx\" (UID: \"d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16\") " pod="openstack/dnsmasq-dns-865f5d856f-4ggfx" Jan 21 15:47:04 crc kubenswrapper[5021]: I0121 15:47:04.094184 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16-config\") pod \"dnsmasq-dns-865f5d856f-4ggfx\" (UID: \"d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16\") " pod="openstack/dnsmasq-dns-865f5d856f-4ggfx" Jan 21 15:47:04 crc kubenswrapper[5021]: I0121 15:47:04.094852 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16-dns-svc\") pod \"dnsmasq-dns-865f5d856f-4ggfx\" (UID: \"d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16\") " pod="openstack/dnsmasq-dns-865f5d856f-4ggfx" Jan 21 15:47:04 crc kubenswrapper[5021]: I0121 15:47:04.094979 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16-ovsdbserver-sb\") pod \"dnsmasq-dns-865f5d856f-4ggfx\" (UID: \"d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16\") " pod="openstack/dnsmasq-dns-865f5d856f-4ggfx" Jan 21 15:47:04 crc kubenswrapper[5021]: I0121 15:47:04.096707 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16-ovsdbserver-sb\") pod \"dnsmasq-dns-865f5d856f-4ggfx\" (UID: \"d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16\") " pod="openstack/dnsmasq-dns-865f5d856f-4ggfx" Jan 21 15:47:04 crc kubenswrapper[5021]: I0121 15:47:04.098725 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16-config\") pod \"dnsmasq-dns-865f5d856f-4ggfx\" (UID: \"d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16\") " pod="openstack/dnsmasq-dns-865f5d856f-4ggfx" Jan 21 15:47:04 crc kubenswrapper[5021]: I0121 15:47:04.099229 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16-dns-swift-storage-0\") pod \"dnsmasq-dns-865f5d856f-4ggfx\" (UID: \"d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16\") " pod="openstack/dnsmasq-dns-865f5d856f-4ggfx" Jan 21 15:47:04 crc kubenswrapper[5021]: I0121 15:47:04.102686 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16-ovsdbserver-nb\") pod \"dnsmasq-dns-865f5d856f-4ggfx\" (UID: \"d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16\") " pod="openstack/dnsmasq-dns-865f5d856f-4ggfx" Jan 21 15:47:04 crc kubenswrapper[5021]: I0121 15:47:04.111128 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16-dns-svc\") pod \"dnsmasq-dns-865f5d856f-4ggfx\" (UID: \"d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16\") " pod="openstack/dnsmasq-dns-865f5d856f-4ggfx" Jan 21 15:47:04 crc kubenswrapper[5021]: I0121 15:47:04.121918 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zjrwc\" (UniqueName: \"kubernetes.io/projected/d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16-kube-api-access-zjrwc\") pod \"dnsmasq-dns-865f5d856f-4ggfx\" (UID: \"d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16\") " pod="openstack/dnsmasq-dns-865f5d856f-4ggfx" Jan 21 15:47:04 crc kubenswrapper[5021]: I0121 15:47:04.182572 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 21 15:47:04 crc kubenswrapper[5021]: I0121 15:47:04.204966 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 21 15:47:04 crc kubenswrapper[5021]: I0121 15:47:04.255547 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-865f5d856f-4ggfx" Jan 21 15:47:04 crc kubenswrapper[5021]: I0121 15:47:04.348234 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 21 15:47:04 crc kubenswrapper[5021]: I0121 15:47:04.381162 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-rl28t"] Jan 21 15:47:04 crc kubenswrapper[5021]: W0121 15:47:04.464340 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8e81a613_3013_46ca_9964_ad6c7deea2b2.slice/crio-f0079841e435c805be11ba8d0ab05b676707b515abbff1b71fe6b08f3cdba033 WatchSource:0}: Error finding container f0079841e435c805be11ba8d0ab05b676707b515abbff1b71fe6b08f3cdba033: Status 404 returned error can't find the container with id f0079841e435c805be11ba8d0ab05b676707b515abbff1b71fe6b08f3cdba033 Jan 21 15:47:04 crc kubenswrapper[5021]: I0121 15:47:04.537518 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"9b042c44-9a4c-4b67-8874-b04dc4f3c0e6","Type":"ContainerStarted","Data":"4da318be56b7c9839f147da390683bb6eaebaba937150896aed6878b5db23a67"} Jan 21 15:47:04 crc kubenswrapper[5021]: I0121 15:47:04.539847 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-rl28t" event={"ID":"8e81a613-3013-46ca-9964-ad6c7deea2b2","Type":"ContainerStarted","Data":"f0079841e435c805be11ba8d0ab05b676707b515abbff1b71fe6b08f3cdba033"} Jan 21 15:47:04 crc kubenswrapper[5021]: I0121 15:47:04.631114 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 21 15:47:04 crc kubenswrapper[5021]: I0121 15:47:04.659261 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-s49kl"] Jan 21 15:47:04 crc kubenswrapper[5021]: I0121 15:47:04.662177 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-s49kl" Jan 21 15:47:04 crc kubenswrapper[5021]: I0121 15:47:04.669619 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Jan 21 15:47:04 crc kubenswrapper[5021]: I0121 15:47:04.669643 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Jan 21 15:47:04 crc kubenswrapper[5021]: I0121 15:47:04.676226 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-s49kl"] Jan 21 15:47:04 crc kubenswrapper[5021]: I0121 15:47:04.685683 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 21 15:47:04 crc kubenswrapper[5021]: I0121 15:47:04.808009 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ff4fq\" (UniqueName: \"kubernetes.io/projected/e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2-kube-api-access-ff4fq\") pod \"nova-cell1-conductor-db-sync-s49kl\" (UID: \"e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2\") " pod="openstack/nova-cell1-conductor-db-sync-s49kl" Jan 21 15:47:04 crc kubenswrapper[5021]: I0121 15:47:04.808083 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-s49kl\" (UID: \"e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2\") " pod="openstack/nova-cell1-conductor-db-sync-s49kl" Jan 21 15:47:04 crc kubenswrapper[5021]: I0121 15:47:04.808132 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2-scripts\") pod \"nova-cell1-conductor-db-sync-s49kl\" (UID: \"e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2\") " pod="openstack/nova-cell1-conductor-db-sync-s49kl" Jan 21 15:47:04 crc kubenswrapper[5021]: I0121 15:47:04.808180 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2-config-data\") pod \"nova-cell1-conductor-db-sync-s49kl\" (UID: \"e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2\") " pod="openstack/nova-cell1-conductor-db-sync-s49kl" Jan 21 15:47:04 crc kubenswrapper[5021]: I0121 15:47:04.909803 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ff4fq\" (UniqueName: \"kubernetes.io/projected/e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2-kube-api-access-ff4fq\") pod \"nova-cell1-conductor-db-sync-s49kl\" (UID: \"e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2\") " pod="openstack/nova-cell1-conductor-db-sync-s49kl" Jan 21 15:47:04 crc kubenswrapper[5021]: I0121 15:47:04.909981 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-s49kl\" (UID: \"e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2\") " pod="openstack/nova-cell1-conductor-db-sync-s49kl" Jan 21 15:47:04 crc kubenswrapper[5021]: I0121 15:47:04.910110 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2-scripts\") pod \"nova-cell1-conductor-db-sync-s49kl\" (UID: \"e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2\") " pod="openstack/nova-cell1-conductor-db-sync-s49kl" Jan 21 15:47:04 crc kubenswrapper[5021]: I0121 15:47:04.910232 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2-config-data\") pod \"nova-cell1-conductor-db-sync-s49kl\" (UID: \"e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2\") " pod="openstack/nova-cell1-conductor-db-sync-s49kl" Jan 21 15:47:04 crc kubenswrapper[5021]: I0121 15:47:04.919630 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2-config-data\") pod \"nova-cell1-conductor-db-sync-s49kl\" (UID: \"e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2\") " pod="openstack/nova-cell1-conductor-db-sync-s49kl" Jan 21 15:47:04 crc kubenswrapper[5021]: I0121 15:47:04.920428 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-s49kl\" (UID: \"e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2\") " pod="openstack/nova-cell1-conductor-db-sync-s49kl" Jan 21 15:47:04 crc kubenswrapper[5021]: I0121 15:47:04.929646 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2-scripts\") pod \"nova-cell1-conductor-db-sync-s49kl\" (UID: \"e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2\") " pod="openstack/nova-cell1-conductor-db-sync-s49kl" Jan 21 15:47:04 crc kubenswrapper[5021]: I0121 15:47:04.944863 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ff4fq\" (UniqueName: \"kubernetes.io/projected/e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2-kube-api-access-ff4fq\") pod \"nova-cell1-conductor-db-sync-s49kl\" (UID: \"e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2\") " pod="openstack/nova-cell1-conductor-db-sync-s49kl" Jan 21 15:47:04 crc kubenswrapper[5021]: I0121 15:47:04.966833 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 21 15:47:04 crc kubenswrapper[5021]: I0121 15:47:04.987100 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-865f5d856f-4ggfx"] Jan 21 15:47:05 crc kubenswrapper[5021]: I0121 15:47:05.111046 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-s49kl" Jan 21 15:47:05 crc kubenswrapper[5021]: I0121 15:47:05.553564 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"62b51ffc-6822-44c8-9f24-597a6616b0a8","Type":"ContainerStarted","Data":"d5aa5f424e01843e6ed9e1bf87921aa94a5799427dd13aaa31a1d97cdcf8fc5c"} Jan 21 15:47:05 crc kubenswrapper[5021]: I0121 15:47:05.559870 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-865f5d856f-4ggfx" event={"ID":"d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16","Type":"ContainerStarted","Data":"d2769a56c1fd9346b53309dbd592b331b72468d30b542d83da95caa81ea9fb5b"} Jan 21 15:47:05 crc kubenswrapper[5021]: I0121 15:47:05.559946 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-865f5d856f-4ggfx" event={"ID":"d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16","Type":"ContainerStarted","Data":"f4918ddd3eeba0042bbd928ec5446ddd04d3de55f5e7cfa81ff90b4964898209"} Jan 21 15:47:05 crc kubenswrapper[5021]: I0121 15:47:05.562017 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-rl28t" event={"ID":"8e81a613-3013-46ca-9964-ad6c7deea2b2","Type":"ContainerStarted","Data":"43652315ff48ea4407b52df2509f36ccd3219420f681b4fe2028c844c527758b"} Jan 21 15:47:05 crc kubenswrapper[5021]: I0121 15:47:05.563710 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"06e71668-be48-4b4a-b4ae-c793c0d0aef7","Type":"ContainerStarted","Data":"b64b62b36a657886e8a0e33b87ce86ed70076069cf0359799e420595646303a6"} Jan 21 15:47:05 crc kubenswrapper[5021]: I0121 15:47:05.566583 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"29fd6d1c-f020-4b78-a4a2-93be72b1ef9c","Type":"ContainerStarted","Data":"b6ab7534af6dc7c0a976f703586a49a1788f359824bfc93f479f7e7c1754047a"} Jan 21 15:47:06 crc kubenswrapper[5021]: I0121 15:47:06.267291 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-s49kl"] Jan 21 15:47:06 crc kubenswrapper[5021]: I0121 15:47:06.592310 5021 generic.go:334] "Generic (PLEG): container finished" podID="d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16" containerID="d2769a56c1fd9346b53309dbd592b331b72468d30b542d83da95caa81ea9fb5b" exitCode=0 Jan 21 15:47:06 crc kubenswrapper[5021]: I0121 15:47:06.592508 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-865f5d856f-4ggfx" event={"ID":"d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16","Type":"ContainerDied","Data":"d2769a56c1fd9346b53309dbd592b331b72468d30b542d83da95caa81ea9fb5b"} Jan 21 15:47:06 crc kubenswrapper[5021]: I0121 15:47:06.597187 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-s49kl" event={"ID":"e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2","Type":"ContainerStarted","Data":"8ffd7c56590e0ef552ab69c79953dfb064b804430a4f5bc577f323b3dfcea70a"} Jan 21 15:47:06 crc kubenswrapper[5021]: I0121 15:47:06.597373 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-s49kl" event={"ID":"e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2","Type":"ContainerStarted","Data":"ef831429e7a06bedefffca02b858664950295b723bf6b2e1ae9eeb28644b9ac6"} Jan 21 15:47:06 crc kubenswrapper[5021]: I0121 15:47:06.639644 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-s49kl" podStartSLOduration=2.639625476 podStartE2EDuration="2.639625476s" podCreationTimestamp="2026-01-21 15:47:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:47:06.633437587 +0000 UTC m=+1368.168551496" watchObservedRunningTime="2026-01-21 15:47:06.639625476 +0000 UTC m=+1368.174739365" Jan 21 15:47:06 crc kubenswrapper[5021]: I0121 15:47:06.653649 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-rl28t" podStartSLOduration=3.653625197 podStartE2EDuration="3.653625197s" podCreationTimestamp="2026-01-21 15:47:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:47:06.651258652 +0000 UTC m=+1368.186372561" watchObservedRunningTime="2026-01-21 15:47:06.653625197 +0000 UTC m=+1368.188739106" Jan 21 15:47:07 crc kubenswrapper[5021]: I0121 15:47:07.469229 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 21 15:47:07 crc kubenswrapper[5021]: I0121 15:47:07.482946 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 21 15:47:07 crc kubenswrapper[5021]: I0121 15:47:07.614281 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-865f5d856f-4ggfx" event={"ID":"d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16","Type":"ContainerStarted","Data":"5ba36e3a2aa67b9c5a478ad80a2d8b5733c4b9d33eb64319eff2f705b846786a"} Jan 21 15:47:07 crc kubenswrapper[5021]: I0121 15:47:07.614675 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-865f5d856f-4ggfx" Jan 21 15:47:07 crc kubenswrapper[5021]: I0121 15:47:07.640658 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-865f5d856f-4ggfx" podStartSLOduration=4.640637781 podStartE2EDuration="4.640637781s" podCreationTimestamp="2026-01-21 15:47:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:47:07.637759552 +0000 UTC m=+1369.172873451" watchObservedRunningTime="2026-01-21 15:47:07.640637781 +0000 UTC m=+1369.175751670" Jan 21 15:47:10 crc kubenswrapper[5021]: I0121 15:47:10.667783 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"06e71668-be48-4b4a-b4ae-c793c0d0aef7","Type":"ContainerStarted","Data":"9ac834745086521eee2d82824de04214bad26591b4d2ed1e33fc5aa7bd286016"} Jan 21 15:47:10 crc kubenswrapper[5021]: I0121 15:47:10.668477 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="06e71668-be48-4b4a-b4ae-c793c0d0aef7" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://9ac834745086521eee2d82824de04214bad26591b4d2ed1e33fc5aa7bd286016" gracePeriod=30 Jan 21 15:47:10 crc kubenswrapper[5021]: I0121 15:47:10.675294 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"29fd6d1c-f020-4b78-a4a2-93be72b1ef9c","Type":"ContainerStarted","Data":"dcfe942c3451df06114fe2aea80a5cb338ac982f02ee8c71bb63af1ee6be3421"} Jan 21 15:47:10 crc kubenswrapper[5021]: I0121 15:47:10.679839 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"62b51ffc-6822-44c8-9f24-597a6616b0a8","Type":"ContainerStarted","Data":"90d9fa934c8662354e02c72954b300751a52d4ec47a74de36fbb50e344bf6579"} Jan 21 15:47:10 crc kubenswrapper[5021]: I0121 15:47:10.683784 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"9b042c44-9a4c-4b67-8874-b04dc4f3c0e6","Type":"ContainerStarted","Data":"4a33972bd4b3d62c3f5b12d4587ae188f26e3dd59ddff5f3984385d909529032"} Jan 21 15:47:10 crc kubenswrapper[5021]: I0121 15:47:10.707501 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.111085747 podStartE2EDuration="7.707476504s" podCreationTimestamp="2026-01-21 15:47:03 +0000 UTC" firstStartedPulling="2026-01-21 15:47:04.657428516 +0000 UTC m=+1366.192542405" lastFinishedPulling="2026-01-21 15:47:10.253819273 +0000 UTC m=+1371.788933162" observedRunningTime="2026-01-21 15:47:10.702651123 +0000 UTC m=+1372.237765012" watchObservedRunningTime="2026-01-21 15:47:10.707476504 +0000 UTC m=+1372.242590393" Jan 21 15:47:10 crc kubenswrapper[5021]: I0121 15:47:10.751740 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=1.96696865 podStartE2EDuration="7.75171562s" podCreationTimestamp="2026-01-21 15:47:03 +0000 UTC" firstStartedPulling="2026-01-21 15:47:04.468366194 +0000 UTC m=+1366.003480083" lastFinishedPulling="2026-01-21 15:47:10.253113164 +0000 UTC m=+1371.788227053" observedRunningTime="2026-01-21 15:47:10.721835155 +0000 UTC m=+1372.256949044" watchObservedRunningTime="2026-01-21 15:47:10.75171562 +0000 UTC m=+1372.286829509" Jan 21 15:47:11 crc kubenswrapper[5021]: I0121 15:47:11.697075 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"29fd6d1c-f020-4b78-a4a2-93be72b1ef9c","Type":"ContainerStarted","Data":"35fa4cfb2d915c278691d3c17171057404da130c39d64afcaafbbffb53141c5f"} Jan 21 15:47:11 crc kubenswrapper[5021]: I0121 15:47:11.703633 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"62b51ffc-6822-44c8-9f24-597a6616b0a8","Type":"ContainerStarted","Data":"9792713d3376694be3b25803514193e233fcf814fc4e00c0aa328737414a6ec0"} Jan 21 15:47:11 crc kubenswrapper[5021]: I0121 15:47:11.703646 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="62b51ffc-6822-44c8-9f24-597a6616b0a8" containerName="nova-metadata-log" containerID="cri-o://90d9fa934c8662354e02c72954b300751a52d4ec47a74de36fbb50e344bf6579" gracePeriod=30 Jan 21 15:47:11 crc kubenswrapper[5021]: I0121 15:47:11.703683 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="62b51ffc-6822-44c8-9f24-597a6616b0a8" containerName="nova-metadata-metadata" containerID="cri-o://9792713d3376694be3b25803514193e233fcf814fc4e00c0aa328737414a6ec0" gracePeriod=30 Jan 21 15:47:11 crc kubenswrapper[5021]: I0121 15:47:11.732785 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.442518214 podStartE2EDuration="8.73276233s" podCreationTimestamp="2026-01-21 15:47:03 +0000 UTC" firstStartedPulling="2026-01-21 15:47:04.964260696 +0000 UTC m=+1366.499374585" lastFinishedPulling="2026-01-21 15:47:10.254504812 +0000 UTC m=+1371.789618701" observedRunningTime="2026-01-21 15:47:11.727478007 +0000 UTC m=+1373.262591896" watchObservedRunningTime="2026-01-21 15:47:11.73276233 +0000 UTC m=+1373.267876219" Jan 21 15:47:11 crc kubenswrapper[5021]: I0121 15:47:11.753820 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.176548688 podStartE2EDuration="8.753799083s" podCreationTimestamp="2026-01-21 15:47:03 +0000 UTC" firstStartedPulling="2026-01-21 15:47:04.704838568 +0000 UTC m=+1366.239952457" lastFinishedPulling="2026-01-21 15:47:10.282088963 +0000 UTC m=+1371.817202852" observedRunningTime="2026-01-21 15:47:11.751147691 +0000 UTC m=+1373.286261590" watchObservedRunningTime="2026-01-21 15:47:11.753799083 +0000 UTC m=+1373.288912972" Jan 21 15:47:11 crc kubenswrapper[5021]: E0121 15:47:11.949882 5021 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod62b51ffc_6822_44c8_9f24_597a6616b0a8.slice/crio-90d9fa934c8662354e02c72954b300751a52d4ec47a74de36fbb50e344bf6579.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod62b51ffc_6822_44c8_9f24_597a6616b0a8.slice/crio-conmon-9792713d3376694be3b25803514193e233fcf814fc4e00c0aa328737414a6ec0.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod62b51ffc_6822_44c8_9f24_597a6616b0a8.slice/crio-conmon-90d9fa934c8662354e02c72954b300751a52d4ec47a74de36fbb50e344bf6579.scope\": RecentStats: unable to find data in memory cache]" Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.322101 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.357609 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.357674 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.357729 5021 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.358646 5021 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"05d959e8221a0471293433aa813fc9a057fa0942334f34a10dd166457b8ac583"} pod="openshift-machine-config-operator/machine-config-daemon-n22xz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.358711 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" containerID="cri-o://05d959e8221a0471293433aa813fc9a057fa0942334f34a10dd166457b8ac583" gracePeriod=600 Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.378153 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/62b51ffc-6822-44c8-9f24-597a6616b0a8-logs\") pod \"62b51ffc-6822-44c8-9f24-597a6616b0a8\" (UID: \"62b51ffc-6822-44c8-9f24-597a6616b0a8\") " Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.378274 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62b51ffc-6822-44c8-9f24-597a6616b0a8-combined-ca-bundle\") pod \"62b51ffc-6822-44c8-9f24-597a6616b0a8\" (UID: \"62b51ffc-6822-44c8-9f24-597a6616b0a8\") " Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.378434 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/62b51ffc-6822-44c8-9f24-597a6616b0a8-config-data\") pod \"62b51ffc-6822-44c8-9f24-597a6616b0a8\" (UID: \"62b51ffc-6822-44c8-9f24-597a6616b0a8\") " Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.378536 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4zgj8\" (UniqueName: \"kubernetes.io/projected/62b51ffc-6822-44c8-9f24-597a6616b0a8-kube-api-access-4zgj8\") pod \"62b51ffc-6822-44c8-9f24-597a6616b0a8\" (UID: \"62b51ffc-6822-44c8-9f24-597a6616b0a8\") " Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.379803 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/62b51ffc-6822-44c8-9f24-597a6616b0a8-logs" (OuterVolumeSpecName: "logs") pod "62b51ffc-6822-44c8-9f24-597a6616b0a8" (UID: "62b51ffc-6822-44c8-9f24-597a6616b0a8"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.384368 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/62b51ffc-6822-44c8-9f24-597a6616b0a8-kube-api-access-4zgj8" (OuterVolumeSpecName: "kube-api-access-4zgj8") pod "62b51ffc-6822-44c8-9f24-597a6616b0a8" (UID: "62b51ffc-6822-44c8-9f24-597a6616b0a8"). InnerVolumeSpecName "kube-api-access-4zgj8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.407318 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62b51ffc-6822-44c8-9f24-597a6616b0a8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "62b51ffc-6822-44c8-9f24-597a6616b0a8" (UID: "62b51ffc-6822-44c8-9f24-597a6616b0a8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.416372 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62b51ffc-6822-44c8-9f24-597a6616b0a8-config-data" (OuterVolumeSpecName: "config-data") pod "62b51ffc-6822-44c8-9f24-597a6616b0a8" (UID: "62b51ffc-6822-44c8-9f24-597a6616b0a8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.481172 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4zgj8\" (UniqueName: \"kubernetes.io/projected/62b51ffc-6822-44c8-9f24-597a6616b0a8-kube-api-access-4zgj8\") on node \"crc\" DevicePath \"\"" Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.481217 5021 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/62b51ffc-6822-44c8-9f24-597a6616b0a8-logs\") on node \"crc\" DevicePath \"\"" Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.481233 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62b51ffc-6822-44c8-9f24-597a6616b0a8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.481244 5021 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/62b51ffc-6822-44c8-9f24-597a6616b0a8-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.727631 5021 generic.go:334] "Generic (PLEG): container finished" podID="62b51ffc-6822-44c8-9f24-597a6616b0a8" containerID="9792713d3376694be3b25803514193e233fcf814fc4e00c0aa328737414a6ec0" exitCode=0 Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.728009 5021 generic.go:334] "Generic (PLEG): container finished" podID="62b51ffc-6822-44c8-9f24-597a6616b0a8" containerID="90d9fa934c8662354e02c72954b300751a52d4ec47a74de36fbb50e344bf6579" exitCode=143 Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.727705 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"62b51ffc-6822-44c8-9f24-597a6616b0a8","Type":"ContainerDied","Data":"9792713d3376694be3b25803514193e233fcf814fc4e00c0aa328737414a6ec0"} Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.727819 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.728119 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"62b51ffc-6822-44c8-9f24-597a6616b0a8","Type":"ContainerDied","Data":"90d9fa934c8662354e02c72954b300751a52d4ec47a74de36fbb50e344bf6579"} Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.728158 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"62b51ffc-6822-44c8-9f24-597a6616b0a8","Type":"ContainerDied","Data":"d5aa5f424e01843e6ed9e1bf87921aa94a5799427dd13aaa31a1d97cdcf8fc5c"} Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.728196 5021 scope.go:117] "RemoveContainer" containerID="9792713d3376694be3b25803514193e233fcf814fc4e00c0aa328737414a6ec0" Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.733934 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" event={"ID":"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1","Type":"ContainerDied","Data":"05d959e8221a0471293433aa813fc9a057fa0942334f34a10dd166457b8ac583"} Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.733894 5021 generic.go:334] "Generic (PLEG): container finished" podID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerID="05d959e8221a0471293433aa813fc9a057fa0942334f34a10dd166457b8ac583" exitCode=0 Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.773239 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.797778 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.801442 5021 scope.go:117] "RemoveContainer" containerID="90d9fa934c8662354e02c72954b300751a52d4ec47a74de36fbb50e344bf6579" Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.819038 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 21 15:47:12 crc kubenswrapper[5021]: E0121 15:47:12.819562 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62b51ffc-6822-44c8-9f24-597a6616b0a8" containerName="nova-metadata-metadata" Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.819589 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="62b51ffc-6822-44c8-9f24-597a6616b0a8" containerName="nova-metadata-metadata" Jan 21 15:47:12 crc kubenswrapper[5021]: E0121 15:47:12.819606 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62b51ffc-6822-44c8-9f24-597a6616b0a8" containerName="nova-metadata-log" Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.819616 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="62b51ffc-6822-44c8-9f24-597a6616b0a8" containerName="nova-metadata-log" Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.819846 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="62b51ffc-6822-44c8-9f24-597a6616b0a8" containerName="nova-metadata-log" Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.819880 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="62b51ffc-6822-44c8-9f24-597a6616b0a8" containerName="nova-metadata-metadata" Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.820942 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.830259 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.830268 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.844473 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.845631 5021 scope.go:117] "RemoveContainer" containerID="9792713d3376694be3b25803514193e233fcf814fc4e00c0aa328737414a6ec0" Jan 21 15:47:12 crc kubenswrapper[5021]: E0121 15:47:12.846020 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9792713d3376694be3b25803514193e233fcf814fc4e00c0aa328737414a6ec0\": container with ID starting with 9792713d3376694be3b25803514193e233fcf814fc4e00c0aa328737414a6ec0 not found: ID does not exist" containerID="9792713d3376694be3b25803514193e233fcf814fc4e00c0aa328737414a6ec0" Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.846059 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9792713d3376694be3b25803514193e233fcf814fc4e00c0aa328737414a6ec0"} err="failed to get container status \"9792713d3376694be3b25803514193e233fcf814fc4e00c0aa328737414a6ec0\": rpc error: code = NotFound desc = could not find container \"9792713d3376694be3b25803514193e233fcf814fc4e00c0aa328737414a6ec0\": container with ID starting with 9792713d3376694be3b25803514193e233fcf814fc4e00c0aa328737414a6ec0 not found: ID does not exist" Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.846365 5021 scope.go:117] "RemoveContainer" containerID="90d9fa934c8662354e02c72954b300751a52d4ec47a74de36fbb50e344bf6579" Jan 21 15:47:12 crc kubenswrapper[5021]: E0121 15:47:12.848081 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"90d9fa934c8662354e02c72954b300751a52d4ec47a74de36fbb50e344bf6579\": container with ID starting with 90d9fa934c8662354e02c72954b300751a52d4ec47a74de36fbb50e344bf6579 not found: ID does not exist" containerID="90d9fa934c8662354e02c72954b300751a52d4ec47a74de36fbb50e344bf6579" Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.848120 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"90d9fa934c8662354e02c72954b300751a52d4ec47a74de36fbb50e344bf6579"} err="failed to get container status \"90d9fa934c8662354e02c72954b300751a52d4ec47a74de36fbb50e344bf6579\": rpc error: code = NotFound desc = could not find container \"90d9fa934c8662354e02c72954b300751a52d4ec47a74de36fbb50e344bf6579\": container with ID starting with 90d9fa934c8662354e02c72954b300751a52d4ec47a74de36fbb50e344bf6579 not found: ID does not exist" Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.848152 5021 scope.go:117] "RemoveContainer" containerID="9792713d3376694be3b25803514193e233fcf814fc4e00c0aa328737414a6ec0" Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.855113 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9792713d3376694be3b25803514193e233fcf814fc4e00c0aa328737414a6ec0"} err="failed to get container status \"9792713d3376694be3b25803514193e233fcf814fc4e00c0aa328737414a6ec0\": rpc error: code = NotFound desc = could not find container \"9792713d3376694be3b25803514193e233fcf814fc4e00c0aa328737414a6ec0\": container with ID starting with 9792713d3376694be3b25803514193e233fcf814fc4e00c0aa328737414a6ec0 not found: ID does not exist" Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.855168 5021 scope.go:117] "RemoveContainer" containerID="90d9fa934c8662354e02c72954b300751a52d4ec47a74de36fbb50e344bf6579" Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.855549 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"90d9fa934c8662354e02c72954b300751a52d4ec47a74de36fbb50e344bf6579"} err="failed to get container status \"90d9fa934c8662354e02c72954b300751a52d4ec47a74de36fbb50e344bf6579\": rpc error: code = NotFound desc = could not find container \"90d9fa934c8662354e02c72954b300751a52d4ec47a74de36fbb50e344bf6579\": container with ID starting with 90d9fa934c8662354e02c72954b300751a52d4ec47a74de36fbb50e344bf6579 not found: ID does not exist" Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.855591 5021 scope.go:117] "RemoveContainer" containerID="3a78b8f8661ff4b02409ce688af67e4727c9123ec4900a181065acc3a089426c" Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.907169 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-29f6c\" (UniqueName: \"kubernetes.io/projected/b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a-kube-api-access-29f6c\") pod \"nova-metadata-0\" (UID: \"b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a\") " pod="openstack/nova-metadata-0" Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.907303 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a\") " pod="openstack/nova-metadata-0" Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.907336 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a-logs\") pod \"nova-metadata-0\" (UID: \"b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a\") " pod="openstack/nova-metadata-0" Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.907411 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a-config-data\") pod \"nova-metadata-0\" (UID: \"b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a\") " pod="openstack/nova-metadata-0" Jan 21 15:47:12 crc kubenswrapper[5021]: I0121 15:47:12.907457 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a\") " pod="openstack/nova-metadata-0" Jan 21 15:47:13 crc kubenswrapper[5021]: I0121 15:47:13.009641 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a\") " pod="openstack/nova-metadata-0" Jan 21 15:47:13 crc kubenswrapper[5021]: I0121 15:47:13.009711 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-29f6c\" (UniqueName: \"kubernetes.io/projected/b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a-kube-api-access-29f6c\") pod \"nova-metadata-0\" (UID: \"b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a\") " pod="openstack/nova-metadata-0" Jan 21 15:47:13 crc kubenswrapper[5021]: I0121 15:47:13.009776 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a\") " pod="openstack/nova-metadata-0" Jan 21 15:47:13 crc kubenswrapper[5021]: I0121 15:47:13.009798 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a-logs\") pod \"nova-metadata-0\" (UID: \"b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a\") " pod="openstack/nova-metadata-0" Jan 21 15:47:13 crc kubenswrapper[5021]: I0121 15:47:13.009868 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a-config-data\") pod \"nova-metadata-0\" (UID: \"b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a\") " pod="openstack/nova-metadata-0" Jan 21 15:47:13 crc kubenswrapper[5021]: I0121 15:47:13.010611 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a-logs\") pod \"nova-metadata-0\" (UID: \"b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a\") " pod="openstack/nova-metadata-0" Jan 21 15:47:13 crc kubenswrapper[5021]: I0121 15:47:13.018688 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a\") " pod="openstack/nova-metadata-0" Jan 21 15:47:13 crc kubenswrapper[5021]: I0121 15:47:13.027635 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a-config-data\") pod \"nova-metadata-0\" (UID: \"b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a\") " pod="openstack/nova-metadata-0" Jan 21 15:47:13 crc kubenswrapper[5021]: I0121 15:47:13.032433 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-29f6c\" (UniqueName: \"kubernetes.io/projected/b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a-kube-api-access-29f6c\") pod \"nova-metadata-0\" (UID: \"b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a\") " pod="openstack/nova-metadata-0" Jan 21 15:47:13 crc kubenswrapper[5021]: I0121 15:47:13.044265 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a\") " pod="openstack/nova-metadata-0" Jan 21 15:47:13 crc kubenswrapper[5021]: I0121 15:47:13.145385 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 21 15:47:13 crc kubenswrapper[5021]: I0121 15:47:13.751745 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" event={"ID":"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1","Type":"ContainerStarted","Data":"8e9be393254a6396e6490f6f969239e3a23b32799a213435ac8de18190c629f8"} Jan 21 15:47:13 crc kubenswrapper[5021]: I0121 15:47:13.788693 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 21 15:47:13 crc kubenswrapper[5021]: I0121 15:47:13.818674 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 21 15:47:13 crc kubenswrapper[5021]: I0121 15:47:13.818735 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 21 15:47:13 crc kubenswrapper[5021]: I0121 15:47:13.857649 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Jan 21 15:47:13 crc kubenswrapper[5021]: I0121 15:47:13.859604 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 21 15:47:14 crc kubenswrapper[5021]: I0121 15:47:14.206346 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 21 15:47:14 crc kubenswrapper[5021]: I0121 15:47:14.206886 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 21 15:47:14 crc kubenswrapper[5021]: I0121 15:47:14.257084 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-865f5d856f-4ggfx" Jan 21 15:47:14 crc kubenswrapper[5021]: I0121 15:47:14.336252 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-d5z29"] Jan 21 15:47:14 crc kubenswrapper[5021]: I0121 15:47:14.336525 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6bb4fc677f-d5z29" podUID="a74c962f-f04a-4b18-b50b-4546528776b5" containerName="dnsmasq-dns" containerID="cri-o://812d4a087958b8c409ebffe2d9d27c71091c65e0167a82c9a44196c8fc4e94bf" gracePeriod=10 Jan 21 15:47:14 crc kubenswrapper[5021]: I0121 15:47:14.751998 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="62b51ffc-6822-44c8-9f24-597a6616b0a8" path="/var/lib/kubelet/pods/62b51ffc-6822-44c8-9f24-597a6616b0a8/volumes" Jan 21 15:47:14 crc kubenswrapper[5021]: I0121 15:47:14.781476 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a","Type":"ContainerStarted","Data":"28952a7009d06082733c73d40f0e3d0d9ada55bc09a79070ba71736edb539af8"} Jan 21 15:47:14 crc kubenswrapper[5021]: I0121 15:47:14.781530 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a","Type":"ContainerStarted","Data":"c3fc96dcd97b23c71b1f75f2f2f13f8e9a52a0dfd75dfb68b862a479c0a382de"} Jan 21 15:47:14 crc kubenswrapper[5021]: I0121 15:47:14.781542 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a","Type":"ContainerStarted","Data":"d7c54ecd930b413c8c8f39e435e1c7e43ccc58c6cef4bae9ac82a897d78810d5"} Jan 21 15:47:14 crc kubenswrapper[5021]: I0121 15:47:14.792868 5021 generic.go:334] "Generic (PLEG): container finished" podID="8e81a613-3013-46ca-9964-ad6c7deea2b2" containerID="43652315ff48ea4407b52df2509f36ccd3219420f681b4fe2028c844c527758b" exitCode=0 Jan 21 15:47:14 crc kubenswrapper[5021]: I0121 15:47:14.793024 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-rl28t" event={"ID":"8e81a613-3013-46ca-9964-ad6c7deea2b2","Type":"ContainerDied","Data":"43652315ff48ea4407b52df2509f36ccd3219420f681b4fe2028c844c527758b"} Jan 21 15:47:14 crc kubenswrapper[5021]: I0121 15:47:14.802117 5021 generic.go:334] "Generic (PLEG): container finished" podID="a74c962f-f04a-4b18-b50b-4546528776b5" containerID="812d4a087958b8c409ebffe2d9d27c71091c65e0167a82c9a44196c8fc4e94bf" exitCode=0 Jan 21 15:47:14 crc kubenswrapper[5021]: I0121 15:47:14.802228 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb4fc677f-d5z29" event={"ID":"a74c962f-f04a-4b18-b50b-4546528776b5","Type":"ContainerDied","Data":"812d4a087958b8c409ebffe2d9d27c71091c65e0167a82c9a44196c8fc4e94bf"} Jan 21 15:47:14 crc kubenswrapper[5021]: I0121 15:47:14.817445 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.8174202680000002 podStartE2EDuration="2.817420268s" podCreationTimestamp="2026-01-21 15:47:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:47:14.807741926 +0000 UTC m=+1376.342855825" watchObservedRunningTime="2026-01-21 15:47:14.817420268 +0000 UTC m=+1376.352534157" Jan 21 15:47:14 crc kubenswrapper[5021]: I0121 15:47:14.840153 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 21 15:47:14 crc kubenswrapper[5021]: I0121 15:47:14.918618 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb4fc677f-d5z29" Jan 21 15:47:15 crc kubenswrapper[5021]: I0121 15:47:15.064211 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pxj54\" (UniqueName: \"kubernetes.io/projected/a74c962f-f04a-4b18-b50b-4546528776b5-kube-api-access-pxj54\") pod \"a74c962f-f04a-4b18-b50b-4546528776b5\" (UID: \"a74c962f-f04a-4b18-b50b-4546528776b5\") " Jan 21 15:47:15 crc kubenswrapper[5021]: I0121 15:47:15.064274 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a74c962f-f04a-4b18-b50b-4546528776b5-ovsdbserver-nb\") pod \"a74c962f-f04a-4b18-b50b-4546528776b5\" (UID: \"a74c962f-f04a-4b18-b50b-4546528776b5\") " Jan 21 15:47:15 crc kubenswrapper[5021]: I0121 15:47:15.064311 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a74c962f-f04a-4b18-b50b-4546528776b5-ovsdbserver-sb\") pod \"a74c962f-f04a-4b18-b50b-4546528776b5\" (UID: \"a74c962f-f04a-4b18-b50b-4546528776b5\") " Jan 21 15:47:15 crc kubenswrapper[5021]: I0121 15:47:15.064376 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a74c962f-f04a-4b18-b50b-4546528776b5-dns-svc\") pod \"a74c962f-f04a-4b18-b50b-4546528776b5\" (UID: \"a74c962f-f04a-4b18-b50b-4546528776b5\") " Jan 21 15:47:15 crc kubenswrapper[5021]: I0121 15:47:15.064398 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a74c962f-f04a-4b18-b50b-4546528776b5-config\") pod \"a74c962f-f04a-4b18-b50b-4546528776b5\" (UID: \"a74c962f-f04a-4b18-b50b-4546528776b5\") " Jan 21 15:47:15 crc kubenswrapper[5021]: I0121 15:47:15.064427 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a74c962f-f04a-4b18-b50b-4546528776b5-dns-swift-storage-0\") pod \"a74c962f-f04a-4b18-b50b-4546528776b5\" (UID: \"a74c962f-f04a-4b18-b50b-4546528776b5\") " Jan 21 15:47:15 crc kubenswrapper[5021]: I0121 15:47:15.076705 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a74c962f-f04a-4b18-b50b-4546528776b5-kube-api-access-pxj54" (OuterVolumeSpecName: "kube-api-access-pxj54") pod "a74c962f-f04a-4b18-b50b-4546528776b5" (UID: "a74c962f-f04a-4b18-b50b-4546528776b5"). InnerVolumeSpecName "kube-api-access-pxj54". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:47:15 crc kubenswrapper[5021]: I0121 15:47:15.112810 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a74c962f-f04a-4b18-b50b-4546528776b5-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "a74c962f-f04a-4b18-b50b-4546528776b5" (UID: "a74c962f-f04a-4b18-b50b-4546528776b5"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:47:15 crc kubenswrapper[5021]: I0121 15:47:15.128339 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a74c962f-f04a-4b18-b50b-4546528776b5-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "a74c962f-f04a-4b18-b50b-4546528776b5" (UID: "a74c962f-f04a-4b18-b50b-4546528776b5"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:47:15 crc kubenswrapper[5021]: I0121 15:47:15.130146 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a74c962f-f04a-4b18-b50b-4546528776b5-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "a74c962f-f04a-4b18-b50b-4546528776b5" (UID: "a74c962f-f04a-4b18-b50b-4546528776b5"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:47:15 crc kubenswrapper[5021]: I0121 15:47:15.143334 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a74c962f-f04a-4b18-b50b-4546528776b5-config" (OuterVolumeSpecName: "config") pod "a74c962f-f04a-4b18-b50b-4546528776b5" (UID: "a74c962f-f04a-4b18-b50b-4546528776b5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:47:15 crc kubenswrapper[5021]: I0121 15:47:15.166425 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pxj54\" (UniqueName: \"kubernetes.io/projected/a74c962f-f04a-4b18-b50b-4546528776b5-kube-api-access-pxj54\") on node \"crc\" DevicePath \"\"" Jan 21 15:47:15 crc kubenswrapper[5021]: I0121 15:47:15.166463 5021 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a74c962f-f04a-4b18-b50b-4546528776b5-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 21 15:47:15 crc kubenswrapper[5021]: I0121 15:47:15.166472 5021 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a74c962f-f04a-4b18-b50b-4546528776b5-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 21 15:47:15 crc kubenswrapper[5021]: I0121 15:47:15.166483 5021 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a74c962f-f04a-4b18-b50b-4546528776b5-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:47:15 crc kubenswrapper[5021]: I0121 15:47:15.166492 5021 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a74c962f-f04a-4b18-b50b-4546528776b5-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 21 15:47:15 crc kubenswrapper[5021]: I0121 15:47:15.186211 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a74c962f-f04a-4b18-b50b-4546528776b5-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a74c962f-f04a-4b18-b50b-4546528776b5" (UID: "a74c962f-f04a-4b18-b50b-4546528776b5"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:47:15 crc kubenswrapper[5021]: I0121 15:47:15.268807 5021 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a74c962f-f04a-4b18-b50b-4546528776b5-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 21 15:47:15 crc kubenswrapper[5021]: I0121 15:47:15.299113 5021 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="29fd6d1c-f020-4b78-a4a2-93be72b1ef9c" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.192:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 21 15:47:15 crc kubenswrapper[5021]: I0121 15:47:15.299113 5021 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="29fd6d1c-f020-4b78-a4a2-93be72b1ef9c" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.192:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 21 15:47:15 crc kubenswrapper[5021]: I0121 15:47:15.813105 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb4fc677f-d5z29" Jan 21 15:47:15 crc kubenswrapper[5021]: I0121 15:47:15.815962 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb4fc677f-d5z29" event={"ID":"a74c962f-f04a-4b18-b50b-4546528776b5","Type":"ContainerDied","Data":"b008daae3f85d1848f6a64978c41509faf0ce07f07cd5d79be8db63acf1f837f"} Jan 21 15:47:15 crc kubenswrapper[5021]: I0121 15:47:15.816003 5021 scope.go:117] "RemoveContainer" containerID="812d4a087958b8c409ebffe2d9d27c71091c65e0167a82c9a44196c8fc4e94bf" Jan 21 15:47:15 crc kubenswrapper[5021]: I0121 15:47:15.858755 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-d5z29"] Jan 21 15:47:15 crc kubenswrapper[5021]: I0121 15:47:15.869827 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-d5z29"] Jan 21 15:47:16 crc kubenswrapper[5021]: I0121 15:47:16.235123 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-rl28t" Jan 21 15:47:16 crc kubenswrapper[5021]: I0121 15:47:16.246286 5021 scope.go:117] "RemoveContainer" containerID="89da4f4af68b745600e9479b39cba9531d6d6bb0cb36940b027129118b9991ca" Jan 21 15:47:16 crc kubenswrapper[5021]: I0121 15:47:16.290429 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8e81a613-3013-46ca-9964-ad6c7deea2b2-scripts\") pod \"8e81a613-3013-46ca-9964-ad6c7deea2b2\" (UID: \"8e81a613-3013-46ca-9964-ad6c7deea2b2\") " Jan 21 15:47:16 crc kubenswrapper[5021]: I0121 15:47:16.290501 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e81a613-3013-46ca-9964-ad6c7deea2b2-config-data\") pod \"8e81a613-3013-46ca-9964-ad6c7deea2b2\" (UID: \"8e81a613-3013-46ca-9964-ad6c7deea2b2\") " Jan 21 15:47:16 crc kubenswrapper[5021]: I0121 15:47:16.290565 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e81a613-3013-46ca-9964-ad6c7deea2b2-combined-ca-bundle\") pod \"8e81a613-3013-46ca-9964-ad6c7deea2b2\" (UID: \"8e81a613-3013-46ca-9964-ad6c7deea2b2\") " Jan 21 15:47:16 crc kubenswrapper[5021]: I0121 15:47:16.290602 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wrt9h\" (UniqueName: \"kubernetes.io/projected/8e81a613-3013-46ca-9964-ad6c7deea2b2-kube-api-access-wrt9h\") pod \"8e81a613-3013-46ca-9964-ad6c7deea2b2\" (UID: \"8e81a613-3013-46ca-9964-ad6c7deea2b2\") " Jan 21 15:47:16 crc kubenswrapper[5021]: I0121 15:47:16.298516 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e81a613-3013-46ca-9964-ad6c7deea2b2-scripts" (OuterVolumeSpecName: "scripts") pod "8e81a613-3013-46ca-9964-ad6c7deea2b2" (UID: "8e81a613-3013-46ca-9964-ad6c7deea2b2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:47:16 crc kubenswrapper[5021]: I0121 15:47:16.300118 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e81a613-3013-46ca-9964-ad6c7deea2b2-kube-api-access-wrt9h" (OuterVolumeSpecName: "kube-api-access-wrt9h") pod "8e81a613-3013-46ca-9964-ad6c7deea2b2" (UID: "8e81a613-3013-46ca-9964-ad6c7deea2b2"). InnerVolumeSpecName "kube-api-access-wrt9h". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:47:16 crc kubenswrapper[5021]: I0121 15:47:16.327126 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e81a613-3013-46ca-9964-ad6c7deea2b2-config-data" (OuterVolumeSpecName: "config-data") pod "8e81a613-3013-46ca-9964-ad6c7deea2b2" (UID: "8e81a613-3013-46ca-9964-ad6c7deea2b2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:47:16 crc kubenswrapper[5021]: I0121 15:47:16.333068 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e81a613-3013-46ca-9964-ad6c7deea2b2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8e81a613-3013-46ca-9964-ad6c7deea2b2" (UID: "8e81a613-3013-46ca-9964-ad6c7deea2b2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:47:16 crc kubenswrapper[5021]: I0121 15:47:16.393294 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e81a613-3013-46ca-9964-ad6c7deea2b2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:47:16 crc kubenswrapper[5021]: I0121 15:47:16.393338 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wrt9h\" (UniqueName: \"kubernetes.io/projected/8e81a613-3013-46ca-9964-ad6c7deea2b2-kube-api-access-wrt9h\") on node \"crc\" DevicePath \"\"" Jan 21 15:47:16 crc kubenswrapper[5021]: I0121 15:47:16.393357 5021 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8e81a613-3013-46ca-9964-ad6c7deea2b2-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:47:16 crc kubenswrapper[5021]: I0121 15:47:16.393369 5021 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e81a613-3013-46ca-9964-ad6c7deea2b2-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:47:16 crc kubenswrapper[5021]: I0121 15:47:16.746668 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a74c962f-f04a-4b18-b50b-4546528776b5" path="/var/lib/kubelet/pods/a74c962f-f04a-4b18-b50b-4546528776b5/volumes" Jan 21 15:47:16 crc kubenswrapper[5021]: I0121 15:47:16.823113 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-rl28t" event={"ID":"8e81a613-3013-46ca-9964-ad6c7deea2b2","Type":"ContainerDied","Data":"f0079841e435c805be11ba8d0ab05b676707b515abbff1b71fe6b08f3cdba033"} Jan 21 15:47:16 crc kubenswrapper[5021]: I0121 15:47:16.823176 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f0079841e435c805be11ba8d0ab05b676707b515abbff1b71fe6b08f3cdba033" Jan 21 15:47:16 crc kubenswrapper[5021]: I0121 15:47:16.823247 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-rl28t" Jan 21 15:47:17 crc kubenswrapper[5021]: I0121 15:47:17.022641 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 21 15:47:17 crc kubenswrapper[5021]: I0121 15:47:17.022992 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="29fd6d1c-f020-4b78-a4a2-93be72b1ef9c" containerName="nova-api-log" containerID="cri-o://dcfe942c3451df06114fe2aea80a5cb338ac982f02ee8c71bb63af1ee6be3421" gracePeriod=30 Jan 21 15:47:17 crc kubenswrapper[5021]: I0121 15:47:17.023138 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="29fd6d1c-f020-4b78-a4a2-93be72b1ef9c" containerName="nova-api-api" containerID="cri-o://35fa4cfb2d915c278691d3c17171057404da130c39d64afcaafbbffb53141c5f" gracePeriod=30 Jan 21 15:47:17 crc kubenswrapper[5021]: I0121 15:47:17.044131 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 21 15:47:17 crc kubenswrapper[5021]: I0121 15:47:17.045146 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="9b042c44-9a4c-4b67-8874-b04dc4f3c0e6" containerName="nova-scheduler-scheduler" containerID="cri-o://4a33972bd4b3d62c3f5b12d4587ae188f26e3dd59ddff5f3984385d909529032" gracePeriod=30 Jan 21 15:47:17 crc kubenswrapper[5021]: I0121 15:47:17.099328 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 21 15:47:17 crc kubenswrapper[5021]: I0121 15:47:17.099599 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a" containerName="nova-metadata-log" containerID="cri-o://c3fc96dcd97b23c71b1f75f2f2f13f8e9a52a0dfd75dfb68b862a479c0a382de" gracePeriod=30 Jan 21 15:47:17 crc kubenswrapper[5021]: I0121 15:47:17.099768 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a" containerName="nova-metadata-metadata" containerID="cri-o://28952a7009d06082733c73d40f0e3d0d9ada55bc09a79070ba71736edb539af8" gracePeriod=30 Jan 21 15:47:17 crc kubenswrapper[5021]: I0121 15:47:17.791621 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 21 15:47:17 crc kubenswrapper[5021]: I0121 15:47:17.849460 5021 generic.go:334] "Generic (PLEG): container finished" podID="b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a" containerID="28952a7009d06082733c73d40f0e3d0d9ada55bc09a79070ba71736edb539af8" exitCode=0 Jan 21 15:47:17 crc kubenswrapper[5021]: I0121 15:47:17.849497 5021 generic.go:334] "Generic (PLEG): container finished" podID="b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a" containerID="c3fc96dcd97b23c71b1f75f2f2f13f8e9a52a0dfd75dfb68b862a479c0a382de" exitCode=143 Jan 21 15:47:17 crc kubenswrapper[5021]: I0121 15:47:17.849543 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a","Type":"ContainerDied","Data":"28952a7009d06082733c73d40f0e3d0d9ada55bc09a79070ba71736edb539af8"} Jan 21 15:47:17 crc kubenswrapper[5021]: I0121 15:47:17.849575 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a","Type":"ContainerDied","Data":"c3fc96dcd97b23c71b1f75f2f2f13f8e9a52a0dfd75dfb68b862a479c0a382de"} Jan 21 15:47:17 crc kubenswrapper[5021]: I0121 15:47:17.849586 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a","Type":"ContainerDied","Data":"d7c54ecd930b413c8c8f39e435e1c7e43ccc58c6cef4bae9ac82a897d78810d5"} Jan 21 15:47:17 crc kubenswrapper[5021]: I0121 15:47:17.849604 5021 scope.go:117] "RemoveContainer" containerID="28952a7009d06082733c73d40f0e3d0d9ada55bc09a79070ba71736edb539af8" Jan 21 15:47:17 crc kubenswrapper[5021]: I0121 15:47:17.849722 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 21 15:47:17 crc kubenswrapper[5021]: I0121 15:47:17.854026 5021 generic.go:334] "Generic (PLEG): container finished" podID="29fd6d1c-f020-4b78-a4a2-93be72b1ef9c" containerID="dcfe942c3451df06114fe2aea80a5cb338ac982f02ee8c71bb63af1ee6be3421" exitCode=143 Jan 21 15:47:17 crc kubenswrapper[5021]: I0121 15:47:17.854093 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"29fd6d1c-f020-4b78-a4a2-93be72b1ef9c","Type":"ContainerDied","Data":"dcfe942c3451df06114fe2aea80a5cb338ac982f02ee8c71bb63af1ee6be3421"} Jan 21 15:47:17 crc kubenswrapper[5021]: I0121 15:47:17.874642 5021 scope.go:117] "RemoveContainer" containerID="c3fc96dcd97b23c71b1f75f2f2f13f8e9a52a0dfd75dfb68b862a479c0a382de" Jan 21 15:47:17 crc kubenswrapper[5021]: I0121 15:47:17.892785 5021 scope.go:117] "RemoveContainer" containerID="28952a7009d06082733c73d40f0e3d0d9ada55bc09a79070ba71736edb539af8" Jan 21 15:47:17 crc kubenswrapper[5021]: E0121 15:47:17.893887 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"28952a7009d06082733c73d40f0e3d0d9ada55bc09a79070ba71736edb539af8\": container with ID starting with 28952a7009d06082733c73d40f0e3d0d9ada55bc09a79070ba71736edb539af8 not found: ID does not exist" containerID="28952a7009d06082733c73d40f0e3d0d9ada55bc09a79070ba71736edb539af8" Jan 21 15:47:17 crc kubenswrapper[5021]: I0121 15:47:17.893968 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28952a7009d06082733c73d40f0e3d0d9ada55bc09a79070ba71736edb539af8"} err="failed to get container status \"28952a7009d06082733c73d40f0e3d0d9ada55bc09a79070ba71736edb539af8\": rpc error: code = NotFound desc = could not find container \"28952a7009d06082733c73d40f0e3d0d9ada55bc09a79070ba71736edb539af8\": container with ID starting with 28952a7009d06082733c73d40f0e3d0d9ada55bc09a79070ba71736edb539af8 not found: ID does not exist" Jan 21 15:47:17 crc kubenswrapper[5021]: I0121 15:47:17.894009 5021 scope.go:117] "RemoveContainer" containerID="c3fc96dcd97b23c71b1f75f2f2f13f8e9a52a0dfd75dfb68b862a479c0a382de" Jan 21 15:47:17 crc kubenswrapper[5021]: E0121 15:47:17.894865 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c3fc96dcd97b23c71b1f75f2f2f13f8e9a52a0dfd75dfb68b862a479c0a382de\": container with ID starting with c3fc96dcd97b23c71b1f75f2f2f13f8e9a52a0dfd75dfb68b862a479c0a382de not found: ID does not exist" containerID="c3fc96dcd97b23c71b1f75f2f2f13f8e9a52a0dfd75dfb68b862a479c0a382de" Jan 21 15:47:17 crc kubenswrapper[5021]: I0121 15:47:17.895146 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c3fc96dcd97b23c71b1f75f2f2f13f8e9a52a0dfd75dfb68b862a479c0a382de"} err="failed to get container status \"c3fc96dcd97b23c71b1f75f2f2f13f8e9a52a0dfd75dfb68b862a479c0a382de\": rpc error: code = NotFound desc = could not find container \"c3fc96dcd97b23c71b1f75f2f2f13f8e9a52a0dfd75dfb68b862a479c0a382de\": container with ID starting with c3fc96dcd97b23c71b1f75f2f2f13f8e9a52a0dfd75dfb68b862a479c0a382de not found: ID does not exist" Jan 21 15:47:17 crc kubenswrapper[5021]: I0121 15:47:17.895186 5021 scope.go:117] "RemoveContainer" containerID="28952a7009d06082733c73d40f0e3d0d9ada55bc09a79070ba71736edb539af8" Jan 21 15:47:17 crc kubenswrapper[5021]: I0121 15:47:17.898377 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28952a7009d06082733c73d40f0e3d0d9ada55bc09a79070ba71736edb539af8"} err="failed to get container status \"28952a7009d06082733c73d40f0e3d0d9ada55bc09a79070ba71736edb539af8\": rpc error: code = NotFound desc = could not find container \"28952a7009d06082733c73d40f0e3d0d9ada55bc09a79070ba71736edb539af8\": container with ID starting with 28952a7009d06082733c73d40f0e3d0d9ada55bc09a79070ba71736edb539af8 not found: ID does not exist" Jan 21 15:47:17 crc kubenswrapper[5021]: I0121 15:47:17.898417 5021 scope.go:117] "RemoveContainer" containerID="c3fc96dcd97b23c71b1f75f2f2f13f8e9a52a0dfd75dfb68b862a479c0a382de" Jan 21 15:47:17 crc kubenswrapper[5021]: I0121 15:47:17.899138 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c3fc96dcd97b23c71b1f75f2f2f13f8e9a52a0dfd75dfb68b862a479c0a382de"} err="failed to get container status \"c3fc96dcd97b23c71b1f75f2f2f13f8e9a52a0dfd75dfb68b862a479c0a382de\": rpc error: code = NotFound desc = could not find container \"c3fc96dcd97b23c71b1f75f2f2f13f8e9a52a0dfd75dfb68b862a479c0a382de\": container with ID starting with c3fc96dcd97b23c71b1f75f2f2f13f8e9a52a0dfd75dfb68b862a479c0a382de not found: ID does not exist" Jan 21 15:47:17 crc kubenswrapper[5021]: I0121 15:47:17.923571 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a-nova-metadata-tls-certs\") pod \"b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a\" (UID: \"b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a\") " Jan 21 15:47:17 crc kubenswrapper[5021]: I0121 15:47:17.923698 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a-logs\") pod \"b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a\" (UID: \"b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a\") " Jan 21 15:47:17 crc kubenswrapper[5021]: I0121 15:47:17.923759 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a-config-data\") pod \"b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a\" (UID: \"b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a\") " Jan 21 15:47:17 crc kubenswrapper[5021]: I0121 15:47:17.923807 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-29f6c\" (UniqueName: \"kubernetes.io/projected/b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a-kube-api-access-29f6c\") pod \"b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a\" (UID: \"b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a\") " Jan 21 15:47:17 crc kubenswrapper[5021]: I0121 15:47:17.924835 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a-logs" (OuterVolumeSpecName: "logs") pod "b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a" (UID: "b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:47:17 crc kubenswrapper[5021]: I0121 15:47:17.925340 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a-combined-ca-bundle\") pod \"b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a\" (UID: \"b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a\") " Jan 21 15:47:17 crc kubenswrapper[5021]: I0121 15:47:17.926215 5021 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a-logs\") on node \"crc\" DevicePath \"\"" Jan 21 15:47:17 crc kubenswrapper[5021]: I0121 15:47:17.929614 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a-kube-api-access-29f6c" (OuterVolumeSpecName: "kube-api-access-29f6c") pod "b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a" (UID: "b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a"). InnerVolumeSpecName "kube-api-access-29f6c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:47:17 crc kubenswrapper[5021]: I0121 15:47:17.951288 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a-config-data" (OuterVolumeSpecName: "config-data") pod "b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a" (UID: "b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:47:17 crc kubenswrapper[5021]: I0121 15:47:17.966342 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a" (UID: "b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:47:17 crc kubenswrapper[5021]: I0121 15:47:17.981346 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a" (UID: "b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:47:18 crc kubenswrapper[5021]: I0121 15:47:18.030471 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:47:18 crc kubenswrapper[5021]: I0121 15:47:18.030516 5021 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 15:47:18 crc kubenswrapper[5021]: I0121 15:47:18.030535 5021 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:47:18 crc kubenswrapper[5021]: I0121 15:47:18.030547 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-29f6c\" (UniqueName: \"kubernetes.io/projected/b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a-kube-api-access-29f6c\") on node \"crc\" DevicePath \"\"" Jan 21 15:47:18 crc kubenswrapper[5021]: I0121 15:47:18.187042 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 21 15:47:18 crc kubenswrapper[5021]: I0121 15:47:18.198808 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 21 15:47:18 crc kubenswrapper[5021]: I0121 15:47:18.210336 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 21 15:47:18 crc kubenswrapper[5021]: E0121 15:47:18.210774 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a" containerName="nova-metadata-metadata" Jan 21 15:47:18 crc kubenswrapper[5021]: I0121 15:47:18.210791 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a" containerName="nova-metadata-metadata" Jan 21 15:47:18 crc kubenswrapper[5021]: E0121 15:47:18.210811 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a" containerName="nova-metadata-log" Jan 21 15:47:18 crc kubenswrapper[5021]: I0121 15:47:18.210818 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a" containerName="nova-metadata-log" Jan 21 15:47:18 crc kubenswrapper[5021]: E0121 15:47:18.210839 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a74c962f-f04a-4b18-b50b-4546528776b5" containerName="init" Jan 21 15:47:18 crc kubenswrapper[5021]: I0121 15:47:18.210846 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="a74c962f-f04a-4b18-b50b-4546528776b5" containerName="init" Jan 21 15:47:18 crc kubenswrapper[5021]: E0121 15:47:18.210858 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e81a613-3013-46ca-9964-ad6c7deea2b2" containerName="nova-manage" Jan 21 15:47:18 crc kubenswrapper[5021]: I0121 15:47:18.210863 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e81a613-3013-46ca-9964-ad6c7deea2b2" containerName="nova-manage" Jan 21 15:47:18 crc kubenswrapper[5021]: E0121 15:47:18.210877 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a74c962f-f04a-4b18-b50b-4546528776b5" containerName="dnsmasq-dns" Jan 21 15:47:18 crc kubenswrapper[5021]: I0121 15:47:18.210882 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="a74c962f-f04a-4b18-b50b-4546528776b5" containerName="dnsmasq-dns" Jan 21 15:47:18 crc kubenswrapper[5021]: I0121 15:47:18.211082 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a" containerName="nova-metadata-log" Jan 21 15:47:18 crc kubenswrapper[5021]: I0121 15:47:18.211092 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="a74c962f-f04a-4b18-b50b-4546528776b5" containerName="dnsmasq-dns" Jan 21 15:47:18 crc kubenswrapper[5021]: I0121 15:47:18.211100 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a" containerName="nova-metadata-metadata" Jan 21 15:47:18 crc kubenswrapper[5021]: I0121 15:47:18.211111 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e81a613-3013-46ca-9964-ad6c7deea2b2" containerName="nova-manage" Jan 21 15:47:18 crc kubenswrapper[5021]: I0121 15:47:18.212033 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 21 15:47:18 crc kubenswrapper[5021]: I0121 15:47:18.214146 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 21 15:47:18 crc kubenswrapper[5021]: I0121 15:47:18.214416 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 21 15:47:18 crc kubenswrapper[5021]: I0121 15:47:18.241010 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 21 15:47:18 crc kubenswrapper[5021]: I0121 15:47:18.336622 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/23436463-da03-498e-90e9-a224f2fa1600-config-data\") pod \"nova-metadata-0\" (UID: \"23436463-da03-498e-90e9-a224f2fa1600\") " pod="openstack/nova-metadata-0" Jan 21 15:47:18 crc kubenswrapper[5021]: I0121 15:47:18.337569 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6dc6z\" (UniqueName: \"kubernetes.io/projected/23436463-da03-498e-90e9-a224f2fa1600-kube-api-access-6dc6z\") pod \"nova-metadata-0\" (UID: \"23436463-da03-498e-90e9-a224f2fa1600\") " pod="openstack/nova-metadata-0" Jan 21 15:47:18 crc kubenswrapper[5021]: I0121 15:47:18.337741 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/23436463-da03-498e-90e9-a224f2fa1600-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"23436463-da03-498e-90e9-a224f2fa1600\") " pod="openstack/nova-metadata-0" Jan 21 15:47:18 crc kubenswrapper[5021]: I0121 15:47:18.337866 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/23436463-da03-498e-90e9-a224f2fa1600-logs\") pod \"nova-metadata-0\" (UID: \"23436463-da03-498e-90e9-a224f2fa1600\") " pod="openstack/nova-metadata-0" Jan 21 15:47:18 crc kubenswrapper[5021]: I0121 15:47:18.338056 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/23436463-da03-498e-90e9-a224f2fa1600-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"23436463-da03-498e-90e9-a224f2fa1600\") " pod="openstack/nova-metadata-0" Jan 21 15:47:18 crc kubenswrapper[5021]: I0121 15:47:18.442296 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/23436463-da03-498e-90e9-a224f2fa1600-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"23436463-da03-498e-90e9-a224f2fa1600\") " pod="openstack/nova-metadata-0" Jan 21 15:47:18 crc kubenswrapper[5021]: I0121 15:47:18.442498 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/23436463-da03-498e-90e9-a224f2fa1600-config-data\") pod \"nova-metadata-0\" (UID: \"23436463-da03-498e-90e9-a224f2fa1600\") " pod="openstack/nova-metadata-0" Jan 21 15:47:18 crc kubenswrapper[5021]: I0121 15:47:18.442544 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6dc6z\" (UniqueName: \"kubernetes.io/projected/23436463-da03-498e-90e9-a224f2fa1600-kube-api-access-6dc6z\") pod \"nova-metadata-0\" (UID: \"23436463-da03-498e-90e9-a224f2fa1600\") " pod="openstack/nova-metadata-0" Jan 21 15:47:18 crc kubenswrapper[5021]: I0121 15:47:18.442601 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/23436463-da03-498e-90e9-a224f2fa1600-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"23436463-da03-498e-90e9-a224f2fa1600\") " pod="openstack/nova-metadata-0" Jan 21 15:47:18 crc kubenswrapper[5021]: I0121 15:47:18.442645 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/23436463-da03-498e-90e9-a224f2fa1600-logs\") pod \"nova-metadata-0\" (UID: \"23436463-da03-498e-90e9-a224f2fa1600\") " pod="openstack/nova-metadata-0" Jan 21 15:47:18 crc kubenswrapper[5021]: I0121 15:47:18.443423 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/23436463-da03-498e-90e9-a224f2fa1600-logs\") pod \"nova-metadata-0\" (UID: \"23436463-da03-498e-90e9-a224f2fa1600\") " pod="openstack/nova-metadata-0" Jan 21 15:47:18 crc kubenswrapper[5021]: I0121 15:47:18.450629 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/23436463-da03-498e-90e9-a224f2fa1600-config-data\") pod \"nova-metadata-0\" (UID: \"23436463-da03-498e-90e9-a224f2fa1600\") " pod="openstack/nova-metadata-0" Jan 21 15:47:18 crc kubenswrapper[5021]: I0121 15:47:18.450786 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/23436463-da03-498e-90e9-a224f2fa1600-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"23436463-da03-498e-90e9-a224f2fa1600\") " pod="openstack/nova-metadata-0" Jan 21 15:47:18 crc kubenswrapper[5021]: I0121 15:47:18.450795 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/23436463-da03-498e-90e9-a224f2fa1600-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"23436463-da03-498e-90e9-a224f2fa1600\") " pod="openstack/nova-metadata-0" Jan 21 15:47:18 crc kubenswrapper[5021]: I0121 15:47:18.468783 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6dc6z\" (UniqueName: \"kubernetes.io/projected/23436463-da03-498e-90e9-a224f2fa1600-kube-api-access-6dc6z\") pod \"nova-metadata-0\" (UID: \"23436463-da03-498e-90e9-a224f2fa1600\") " pod="openstack/nova-metadata-0" Jan 21 15:47:18 crc kubenswrapper[5021]: I0121 15:47:18.538419 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 21 15:47:18 crc kubenswrapper[5021]: I0121 15:47:18.759217 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a" path="/var/lib/kubelet/pods/b04ddc42-1c8d-4e1c-b1bb-e2e868244a7a/volumes" Jan 21 15:47:18 crc kubenswrapper[5021]: E0121 15:47:18.820480 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 4a33972bd4b3d62c3f5b12d4587ae188f26e3dd59ddff5f3984385d909529032 is running failed: container process not found" containerID="4a33972bd4b3d62c3f5b12d4587ae188f26e3dd59ddff5f3984385d909529032" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 21 15:47:18 crc kubenswrapper[5021]: E0121 15:47:18.821142 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 4a33972bd4b3d62c3f5b12d4587ae188f26e3dd59ddff5f3984385d909529032 is running failed: container process not found" containerID="4a33972bd4b3d62c3f5b12d4587ae188f26e3dd59ddff5f3984385d909529032" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 21 15:47:18 crc kubenswrapper[5021]: E0121 15:47:18.821648 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 4a33972bd4b3d62c3f5b12d4587ae188f26e3dd59ddff5f3984385d909529032 is running failed: container process not found" containerID="4a33972bd4b3d62c3f5b12d4587ae188f26e3dd59ddff5f3984385d909529032" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 21 15:47:18 crc kubenswrapper[5021]: E0121 15:47:18.821686 5021 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 4a33972bd4b3d62c3f5b12d4587ae188f26e3dd59ddff5f3984385d909529032 is running failed: container process not found" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="9b042c44-9a4c-4b67-8874-b04dc4f3c0e6" containerName="nova-scheduler-scheduler" Jan 21 15:47:18 crc kubenswrapper[5021]: I0121 15:47:18.876237 5021 generic.go:334] "Generic (PLEG): container finished" podID="9b042c44-9a4c-4b67-8874-b04dc4f3c0e6" containerID="4a33972bd4b3d62c3f5b12d4587ae188f26e3dd59ddff5f3984385d909529032" exitCode=0 Jan 21 15:47:18 crc kubenswrapper[5021]: I0121 15:47:18.876302 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"9b042c44-9a4c-4b67-8874-b04dc4f3c0e6","Type":"ContainerDied","Data":"4a33972bd4b3d62c3f5b12d4587ae188f26e3dd59ddff5f3984385d909529032"} Jan 21 15:47:19 crc kubenswrapper[5021]: I0121 15:47:19.004637 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 21 15:47:19 crc kubenswrapper[5021]: I0121 15:47:19.177730 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 21 15:47:19 crc kubenswrapper[5021]: I0121 15:47:19.259288 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b042c44-9a4c-4b67-8874-b04dc4f3c0e6-combined-ca-bundle\") pod \"9b042c44-9a4c-4b67-8874-b04dc4f3c0e6\" (UID: \"9b042c44-9a4c-4b67-8874-b04dc4f3c0e6\") " Jan 21 15:47:19 crc kubenswrapper[5021]: I0121 15:47:19.259371 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b042c44-9a4c-4b67-8874-b04dc4f3c0e6-config-data\") pod \"9b042c44-9a4c-4b67-8874-b04dc4f3c0e6\" (UID: \"9b042c44-9a4c-4b67-8874-b04dc4f3c0e6\") " Jan 21 15:47:19 crc kubenswrapper[5021]: I0121 15:47:19.259402 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rwxmr\" (UniqueName: \"kubernetes.io/projected/9b042c44-9a4c-4b67-8874-b04dc4f3c0e6-kube-api-access-rwxmr\") pod \"9b042c44-9a4c-4b67-8874-b04dc4f3c0e6\" (UID: \"9b042c44-9a4c-4b67-8874-b04dc4f3c0e6\") " Jan 21 15:47:19 crc kubenswrapper[5021]: I0121 15:47:19.266073 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b042c44-9a4c-4b67-8874-b04dc4f3c0e6-kube-api-access-rwxmr" (OuterVolumeSpecName: "kube-api-access-rwxmr") pod "9b042c44-9a4c-4b67-8874-b04dc4f3c0e6" (UID: "9b042c44-9a4c-4b67-8874-b04dc4f3c0e6"). InnerVolumeSpecName "kube-api-access-rwxmr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:47:19 crc kubenswrapper[5021]: I0121 15:47:19.292949 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b042c44-9a4c-4b67-8874-b04dc4f3c0e6-config-data" (OuterVolumeSpecName: "config-data") pod "9b042c44-9a4c-4b67-8874-b04dc4f3c0e6" (UID: "9b042c44-9a4c-4b67-8874-b04dc4f3c0e6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:47:19 crc kubenswrapper[5021]: I0121 15:47:19.296536 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b042c44-9a4c-4b67-8874-b04dc4f3c0e6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9b042c44-9a4c-4b67-8874-b04dc4f3c0e6" (UID: "9b042c44-9a4c-4b67-8874-b04dc4f3c0e6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:47:19 crc kubenswrapper[5021]: I0121 15:47:19.364453 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b042c44-9a4c-4b67-8874-b04dc4f3c0e6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:47:19 crc kubenswrapper[5021]: I0121 15:47:19.365160 5021 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b042c44-9a4c-4b67-8874-b04dc4f3c0e6-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:47:19 crc kubenswrapper[5021]: I0121 15:47:19.365186 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rwxmr\" (UniqueName: \"kubernetes.io/projected/9b042c44-9a4c-4b67-8874-b04dc4f3c0e6-kube-api-access-rwxmr\") on node \"crc\" DevicePath \"\"" Jan 21 15:47:19 crc kubenswrapper[5021]: I0121 15:47:19.889714 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"9b042c44-9a4c-4b67-8874-b04dc4f3c0e6","Type":"ContainerDied","Data":"4da318be56b7c9839f147da390683bb6eaebaba937150896aed6878b5db23a67"} Jan 21 15:47:19 crc kubenswrapper[5021]: I0121 15:47:19.889760 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 21 15:47:19 crc kubenswrapper[5021]: I0121 15:47:19.889776 5021 scope.go:117] "RemoveContainer" containerID="4a33972bd4b3d62c3f5b12d4587ae188f26e3dd59ddff5f3984385d909529032" Jan 21 15:47:19 crc kubenswrapper[5021]: I0121 15:47:19.894193 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"23436463-da03-498e-90e9-a224f2fa1600","Type":"ContainerStarted","Data":"2e86eac1f2449a41f78dad013fa386ac5187e07612bf88e22dc9295311e163b5"} Jan 21 15:47:19 crc kubenswrapper[5021]: I0121 15:47:19.894232 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"23436463-da03-498e-90e9-a224f2fa1600","Type":"ContainerStarted","Data":"d977c5fa121c11f08d5bce3d668c5a9e751eb49c2098d3b8f863976c805e8275"} Jan 21 15:47:19 crc kubenswrapper[5021]: I0121 15:47:19.894242 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"23436463-da03-498e-90e9-a224f2fa1600","Type":"ContainerStarted","Data":"64e8f5339afdedfee7db1007103ed997ea7f9bc1f54129e879b6404243e84219"} Jan 21 15:47:19 crc kubenswrapper[5021]: I0121 15:47:19.933666 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=1.933646173 podStartE2EDuration="1.933646173s" podCreationTimestamp="2026-01-21 15:47:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:47:19.914147032 +0000 UTC m=+1381.449260931" watchObservedRunningTime="2026-01-21 15:47:19.933646173 +0000 UTC m=+1381.468760062" Jan 21 15:47:19 crc kubenswrapper[5021]: I0121 15:47:19.940022 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 21 15:47:19 crc kubenswrapper[5021]: I0121 15:47:19.952209 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 21 15:47:19 crc kubenswrapper[5021]: I0121 15:47:19.968065 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 21 15:47:19 crc kubenswrapper[5021]: E0121 15:47:19.968484 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b042c44-9a4c-4b67-8874-b04dc4f3c0e6" containerName="nova-scheduler-scheduler" Jan 21 15:47:19 crc kubenswrapper[5021]: I0121 15:47:19.968497 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b042c44-9a4c-4b67-8874-b04dc4f3c0e6" containerName="nova-scheduler-scheduler" Jan 21 15:47:19 crc kubenswrapper[5021]: I0121 15:47:19.968674 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b042c44-9a4c-4b67-8874-b04dc4f3c0e6" containerName="nova-scheduler-scheduler" Jan 21 15:47:19 crc kubenswrapper[5021]: I0121 15:47:19.969263 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 21 15:47:19 crc kubenswrapper[5021]: I0121 15:47:19.973155 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 21 15:47:19 crc kubenswrapper[5021]: I0121 15:47:19.991487 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 21 15:47:20 crc kubenswrapper[5021]: I0121 15:47:20.078410 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b8a8a56-5cdf-4e01-863f-30d235d24321-config-data\") pod \"nova-scheduler-0\" (UID: \"9b8a8a56-5cdf-4e01-863f-30d235d24321\") " pod="openstack/nova-scheduler-0" Jan 21 15:47:20 crc kubenswrapper[5021]: I0121 15:47:20.078478 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w22wg\" (UniqueName: \"kubernetes.io/projected/9b8a8a56-5cdf-4e01-863f-30d235d24321-kube-api-access-w22wg\") pod \"nova-scheduler-0\" (UID: \"9b8a8a56-5cdf-4e01-863f-30d235d24321\") " pod="openstack/nova-scheduler-0" Jan 21 15:47:20 crc kubenswrapper[5021]: I0121 15:47:20.078543 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b8a8a56-5cdf-4e01-863f-30d235d24321-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"9b8a8a56-5cdf-4e01-863f-30d235d24321\") " pod="openstack/nova-scheduler-0" Jan 21 15:47:20 crc kubenswrapper[5021]: I0121 15:47:20.180742 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b8a8a56-5cdf-4e01-863f-30d235d24321-config-data\") pod \"nova-scheduler-0\" (UID: \"9b8a8a56-5cdf-4e01-863f-30d235d24321\") " pod="openstack/nova-scheduler-0" Jan 21 15:47:20 crc kubenswrapper[5021]: I0121 15:47:20.180806 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w22wg\" (UniqueName: \"kubernetes.io/projected/9b8a8a56-5cdf-4e01-863f-30d235d24321-kube-api-access-w22wg\") pod \"nova-scheduler-0\" (UID: \"9b8a8a56-5cdf-4e01-863f-30d235d24321\") " pod="openstack/nova-scheduler-0" Jan 21 15:47:20 crc kubenswrapper[5021]: I0121 15:47:20.180832 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b8a8a56-5cdf-4e01-863f-30d235d24321-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"9b8a8a56-5cdf-4e01-863f-30d235d24321\") " pod="openstack/nova-scheduler-0" Jan 21 15:47:20 crc kubenswrapper[5021]: I0121 15:47:20.184652 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b8a8a56-5cdf-4e01-863f-30d235d24321-config-data\") pod \"nova-scheduler-0\" (UID: \"9b8a8a56-5cdf-4e01-863f-30d235d24321\") " pod="openstack/nova-scheduler-0" Jan 21 15:47:20 crc kubenswrapper[5021]: I0121 15:47:20.190591 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b8a8a56-5cdf-4e01-863f-30d235d24321-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"9b8a8a56-5cdf-4e01-863f-30d235d24321\") " pod="openstack/nova-scheduler-0" Jan 21 15:47:20 crc kubenswrapper[5021]: I0121 15:47:20.211576 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w22wg\" (UniqueName: \"kubernetes.io/projected/9b8a8a56-5cdf-4e01-863f-30d235d24321-kube-api-access-w22wg\") pod \"nova-scheduler-0\" (UID: \"9b8a8a56-5cdf-4e01-863f-30d235d24321\") " pod="openstack/nova-scheduler-0" Jan 21 15:47:20 crc kubenswrapper[5021]: I0121 15:47:20.298783 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 21 15:47:20 crc kubenswrapper[5021]: I0121 15:47:20.727003 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 21 15:47:20 crc kubenswrapper[5021]: I0121 15:47:20.750422 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b042c44-9a4c-4b67-8874-b04dc4f3c0e6" path="/var/lib/kubelet/pods/9b042c44-9a4c-4b67-8874-b04dc4f3c0e6/volumes" Jan 21 15:47:20 crc kubenswrapper[5021]: I0121 15:47:20.908180 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"9b8a8a56-5cdf-4e01-863f-30d235d24321","Type":"ContainerStarted","Data":"444c89d1414feb2ac437f982f34bb6b292212218d81d452581a4a855ec152914"} Jan 21 15:47:22 crc kubenswrapper[5021]: I0121 15:47:22.932742 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"9b8a8a56-5cdf-4e01-863f-30d235d24321","Type":"ContainerStarted","Data":"0ce48b95bb2a19c2d7adfa1ac62aeaa3b83038faeb597d6cac4b65fd7f65db6f"} Jan 21 15:47:23 crc kubenswrapper[5021]: I0121 15:47:23.539044 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 21 15:47:23 crc kubenswrapper[5021]: I0121 15:47:23.539099 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 21 15:47:24 crc kubenswrapper[5021]: I0121 15:47:24.971497 5021 generic.go:334] "Generic (PLEG): container finished" podID="29fd6d1c-f020-4b78-a4a2-93be72b1ef9c" containerID="35fa4cfb2d915c278691d3c17171057404da130c39d64afcaafbbffb53141c5f" exitCode=0 Jan 21 15:47:24 crc kubenswrapper[5021]: I0121 15:47:24.972082 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"29fd6d1c-f020-4b78-a4a2-93be72b1ef9c","Type":"ContainerDied","Data":"35fa4cfb2d915c278691d3c17171057404da130c39d64afcaafbbffb53141c5f"} Jan 21 15:47:24 crc kubenswrapper[5021]: I0121 15:47:24.992876 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=5.992857743 podStartE2EDuration="5.992857743s" podCreationTimestamp="2026-01-21 15:47:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:47:24.989722338 +0000 UTC m=+1386.524836227" watchObservedRunningTime="2026-01-21 15:47:24.992857743 +0000 UTC m=+1386.527971632" Jan 21 15:47:25 crc kubenswrapper[5021]: I0121 15:47:25.140979 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 21 15:47:25 crc kubenswrapper[5021]: I0121 15:47:25.299142 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 21 15:47:25 crc kubenswrapper[5021]: I0121 15:47:25.736807 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 21 15:47:25 crc kubenswrapper[5021]: I0121 15:47:25.913698 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/29fd6d1c-f020-4b78-a4a2-93be72b1ef9c-config-data\") pod \"29fd6d1c-f020-4b78-a4a2-93be72b1ef9c\" (UID: \"29fd6d1c-f020-4b78-a4a2-93be72b1ef9c\") " Jan 21 15:47:25 crc kubenswrapper[5021]: I0121 15:47:25.913809 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bkh7x\" (UniqueName: \"kubernetes.io/projected/29fd6d1c-f020-4b78-a4a2-93be72b1ef9c-kube-api-access-bkh7x\") pod \"29fd6d1c-f020-4b78-a4a2-93be72b1ef9c\" (UID: \"29fd6d1c-f020-4b78-a4a2-93be72b1ef9c\") " Jan 21 15:47:25 crc kubenswrapper[5021]: I0121 15:47:25.913835 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/29fd6d1c-f020-4b78-a4a2-93be72b1ef9c-logs\") pod \"29fd6d1c-f020-4b78-a4a2-93be72b1ef9c\" (UID: \"29fd6d1c-f020-4b78-a4a2-93be72b1ef9c\") " Jan 21 15:47:25 crc kubenswrapper[5021]: I0121 15:47:25.913951 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29fd6d1c-f020-4b78-a4a2-93be72b1ef9c-combined-ca-bundle\") pod \"29fd6d1c-f020-4b78-a4a2-93be72b1ef9c\" (UID: \"29fd6d1c-f020-4b78-a4a2-93be72b1ef9c\") " Jan 21 15:47:25 crc kubenswrapper[5021]: I0121 15:47:25.915392 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/29fd6d1c-f020-4b78-a4a2-93be72b1ef9c-logs" (OuterVolumeSpecName: "logs") pod "29fd6d1c-f020-4b78-a4a2-93be72b1ef9c" (UID: "29fd6d1c-f020-4b78-a4a2-93be72b1ef9c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:47:25 crc kubenswrapper[5021]: I0121 15:47:25.925316 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29fd6d1c-f020-4b78-a4a2-93be72b1ef9c-kube-api-access-bkh7x" (OuterVolumeSpecName: "kube-api-access-bkh7x") pod "29fd6d1c-f020-4b78-a4a2-93be72b1ef9c" (UID: "29fd6d1c-f020-4b78-a4a2-93be72b1ef9c"). InnerVolumeSpecName "kube-api-access-bkh7x". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:47:25 crc kubenswrapper[5021]: I0121 15:47:25.941561 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29fd6d1c-f020-4b78-a4a2-93be72b1ef9c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "29fd6d1c-f020-4b78-a4a2-93be72b1ef9c" (UID: "29fd6d1c-f020-4b78-a4a2-93be72b1ef9c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:47:25 crc kubenswrapper[5021]: I0121 15:47:25.943417 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29fd6d1c-f020-4b78-a4a2-93be72b1ef9c-config-data" (OuterVolumeSpecName: "config-data") pod "29fd6d1c-f020-4b78-a4a2-93be72b1ef9c" (UID: "29fd6d1c-f020-4b78-a4a2-93be72b1ef9c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:47:25 crc kubenswrapper[5021]: I0121 15:47:25.985511 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"29fd6d1c-f020-4b78-a4a2-93be72b1ef9c","Type":"ContainerDied","Data":"b6ab7534af6dc7c0a976f703586a49a1788f359824bfc93f479f7e7c1754047a"} Jan 21 15:47:25 crc kubenswrapper[5021]: I0121 15:47:25.985532 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 21 15:47:25 crc kubenswrapper[5021]: I0121 15:47:25.985579 5021 scope.go:117] "RemoveContainer" containerID="35fa4cfb2d915c278691d3c17171057404da130c39d64afcaafbbffb53141c5f" Jan 21 15:47:26 crc kubenswrapper[5021]: I0121 15:47:26.017828 5021 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/29fd6d1c-f020-4b78-a4a2-93be72b1ef9c-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:47:26 crc kubenswrapper[5021]: I0121 15:47:26.017867 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bkh7x\" (UniqueName: \"kubernetes.io/projected/29fd6d1c-f020-4b78-a4a2-93be72b1ef9c-kube-api-access-bkh7x\") on node \"crc\" DevicePath \"\"" Jan 21 15:47:26 crc kubenswrapper[5021]: I0121 15:47:26.017881 5021 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/29fd6d1c-f020-4b78-a4a2-93be72b1ef9c-logs\") on node \"crc\" DevicePath \"\"" Jan 21 15:47:26 crc kubenswrapper[5021]: I0121 15:47:26.017891 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29fd6d1c-f020-4b78-a4a2-93be72b1ef9c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:47:26 crc kubenswrapper[5021]: I0121 15:47:26.033775 5021 scope.go:117] "RemoveContainer" containerID="dcfe942c3451df06114fe2aea80a5cb338ac982f02ee8c71bb63af1ee6be3421" Jan 21 15:47:26 crc kubenswrapper[5021]: I0121 15:47:26.038122 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 21 15:47:26 crc kubenswrapper[5021]: I0121 15:47:26.064108 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 21 15:47:26 crc kubenswrapper[5021]: I0121 15:47:26.074465 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 21 15:47:26 crc kubenswrapper[5021]: E0121 15:47:26.074985 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29fd6d1c-f020-4b78-a4a2-93be72b1ef9c" containerName="nova-api-log" Jan 21 15:47:26 crc kubenswrapper[5021]: I0121 15:47:26.075007 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="29fd6d1c-f020-4b78-a4a2-93be72b1ef9c" containerName="nova-api-log" Jan 21 15:47:26 crc kubenswrapper[5021]: E0121 15:47:26.075038 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29fd6d1c-f020-4b78-a4a2-93be72b1ef9c" containerName="nova-api-api" Jan 21 15:47:26 crc kubenswrapper[5021]: I0121 15:47:26.075047 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="29fd6d1c-f020-4b78-a4a2-93be72b1ef9c" containerName="nova-api-api" Jan 21 15:47:26 crc kubenswrapper[5021]: I0121 15:47:26.075292 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="29fd6d1c-f020-4b78-a4a2-93be72b1ef9c" containerName="nova-api-log" Jan 21 15:47:26 crc kubenswrapper[5021]: I0121 15:47:26.075358 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="29fd6d1c-f020-4b78-a4a2-93be72b1ef9c" containerName="nova-api-api" Jan 21 15:47:26 crc kubenswrapper[5021]: I0121 15:47:26.076383 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 21 15:47:26 crc kubenswrapper[5021]: I0121 15:47:26.085334 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 21 15:47:26 crc kubenswrapper[5021]: I0121 15:47:26.085767 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 21 15:47:26 crc kubenswrapper[5021]: I0121 15:47:26.222237 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/85e5e930-dd20-4169-b38c-d8765b1aa977-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"85e5e930-dd20-4169-b38c-d8765b1aa977\") " pod="openstack/nova-api-0" Jan 21 15:47:26 crc kubenswrapper[5021]: I0121 15:47:26.222343 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/85e5e930-dd20-4169-b38c-d8765b1aa977-config-data\") pod \"nova-api-0\" (UID: \"85e5e930-dd20-4169-b38c-d8765b1aa977\") " pod="openstack/nova-api-0" Jan 21 15:47:26 crc kubenswrapper[5021]: I0121 15:47:26.222369 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/85e5e930-dd20-4169-b38c-d8765b1aa977-logs\") pod \"nova-api-0\" (UID: \"85e5e930-dd20-4169-b38c-d8765b1aa977\") " pod="openstack/nova-api-0" Jan 21 15:47:26 crc kubenswrapper[5021]: I0121 15:47:26.222497 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6hst8\" (UniqueName: \"kubernetes.io/projected/85e5e930-dd20-4169-b38c-d8765b1aa977-kube-api-access-6hst8\") pod \"nova-api-0\" (UID: \"85e5e930-dd20-4169-b38c-d8765b1aa977\") " pod="openstack/nova-api-0" Jan 21 15:47:26 crc kubenswrapper[5021]: I0121 15:47:26.323775 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6hst8\" (UniqueName: \"kubernetes.io/projected/85e5e930-dd20-4169-b38c-d8765b1aa977-kube-api-access-6hst8\") pod \"nova-api-0\" (UID: \"85e5e930-dd20-4169-b38c-d8765b1aa977\") " pod="openstack/nova-api-0" Jan 21 15:47:26 crc kubenswrapper[5021]: I0121 15:47:26.323858 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/85e5e930-dd20-4169-b38c-d8765b1aa977-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"85e5e930-dd20-4169-b38c-d8765b1aa977\") " pod="openstack/nova-api-0" Jan 21 15:47:26 crc kubenswrapper[5021]: I0121 15:47:26.324014 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/85e5e930-dd20-4169-b38c-d8765b1aa977-config-data\") pod \"nova-api-0\" (UID: \"85e5e930-dd20-4169-b38c-d8765b1aa977\") " pod="openstack/nova-api-0" Jan 21 15:47:26 crc kubenswrapper[5021]: I0121 15:47:26.324066 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/85e5e930-dd20-4169-b38c-d8765b1aa977-logs\") pod \"nova-api-0\" (UID: \"85e5e930-dd20-4169-b38c-d8765b1aa977\") " pod="openstack/nova-api-0" Jan 21 15:47:26 crc kubenswrapper[5021]: I0121 15:47:26.324601 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/85e5e930-dd20-4169-b38c-d8765b1aa977-logs\") pod \"nova-api-0\" (UID: \"85e5e930-dd20-4169-b38c-d8765b1aa977\") " pod="openstack/nova-api-0" Jan 21 15:47:26 crc kubenswrapper[5021]: I0121 15:47:26.328754 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/85e5e930-dd20-4169-b38c-d8765b1aa977-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"85e5e930-dd20-4169-b38c-d8765b1aa977\") " pod="openstack/nova-api-0" Jan 21 15:47:26 crc kubenswrapper[5021]: I0121 15:47:26.333666 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/85e5e930-dd20-4169-b38c-d8765b1aa977-config-data\") pod \"nova-api-0\" (UID: \"85e5e930-dd20-4169-b38c-d8765b1aa977\") " pod="openstack/nova-api-0" Jan 21 15:47:26 crc kubenswrapper[5021]: I0121 15:47:26.342210 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6hst8\" (UniqueName: \"kubernetes.io/projected/85e5e930-dd20-4169-b38c-d8765b1aa977-kube-api-access-6hst8\") pod \"nova-api-0\" (UID: \"85e5e930-dd20-4169-b38c-d8765b1aa977\") " pod="openstack/nova-api-0" Jan 21 15:47:26 crc kubenswrapper[5021]: I0121 15:47:26.403091 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 21 15:47:26 crc kubenswrapper[5021]: I0121 15:47:26.749051 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="29fd6d1c-f020-4b78-a4a2-93be72b1ef9c" path="/var/lib/kubelet/pods/29fd6d1c-f020-4b78-a4a2-93be72b1ef9c/volumes" Jan 21 15:47:26 crc kubenswrapper[5021]: I0121 15:47:26.861215 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 21 15:47:26 crc kubenswrapper[5021]: I0121 15:47:26.995098 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"85e5e930-dd20-4169-b38c-d8765b1aa977","Type":"ContainerStarted","Data":"e4d96d3054ccebe8ada7dd078e7bec0921f9ef91d4f4f2deb000fb622ecb6bfc"} Jan 21 15:47:28 crc kubenswrapper[5021]: I0121 15:47:28.009703 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"85e5e930-dd20-4169-b38c-d8765b1aa977","Type":"ContainerStarted","Data":"07aa1f35aef7f864c78780420f6e8b42ce204e3158ea4d0e365cd55f03ac962e"} Jan 21 15:47:28 crc kubenswrapper[5021]: I0121 15:47:28.010075 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"85e5e930-dd20-4169-b38c-d8765b1aa977","Type":"ContainerStarted","Data":"e3dd7e462b45e8090d76ae54ff12517facf2b37b72205a25165623c3ca0ab54f"} Jan 21 15:47:28 crc kubenswrapper[5021]: I0121 15:47:28.038678 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.038654833 podStartE2EDuration="2.038654833s" podCreationTimestamp="2026-01-21 15:47:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:47:28.030426189 +0000 UTC m=+1389.565540078" watchObservedRunningTime="2026-01-21 15:47:28.038654833 +0000 UTC m=+1389.573768722" Jan 21 15:47:28 crc kubenswrapper[5021]: I0121 15:47:28.539110 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 21 15:47:28 crc kubenswrapper[5021]: I0121 15:47:28.540651 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 21 15:47:29 crc kubenswrapper[5021]: I0121 15:47:29.554043 5021 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="23436463-da03-498e-90e9-a224f2fa1600" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.196:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 21 15:47:29 crc kubenswrapper[5021]: I0121 15:47:29.554043 5021 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="23436463-da03-498e-90e9-a224f2fa1600" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.196:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 21 15:47:30 crc kubenswrapper[5021]: I0121 15:47:30.299630 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 21 15:47:30 crc kubenswrapper[5021]: I0121 15:47:30.329697 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 21 15:47:31 crc kubenswrapper[5021]: I0121 15:47:31.063608 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 21 15:47:33 crc kubenswrapper[5021]: I0121 15:47:33.061532 5021 generic.go:334] "Generic (PLEG): container finished" podID="e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2" containerID="8ffd7c56590e0ef552ab69c79953dfb064b804430a4f5bc577f323b3dfcea70a" exitCode=0 Jan 21 15:47:33 crc kubenswrapper[5021]: I0121 15:47:33.061663 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-s49kl" event={"ID":"e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2","Type":"ContainerDied","Data":"8ffd7c56590e0ef552ab69c79953dfb064b804430a4f5bc577f323b3dfcea70a"} Jan 21 15:47:34 crc kubenswrapper[5021]: I0121 15:47:34.415024 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-s49kl" Jan 21 15:47:34 crc kubenswrapper[5021]: I0121 15:47:34.586036 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ff4fq\" (UniqueName: \"kubernetes.io/projected/e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2-kube-api-access-ff4fq\") pod \"e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2\" (UID: \"e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2\") " Jan 21 15:47:34 crc kubenswrapper[5021]: I0121 15:47:34.586394 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2-combined-ca-bundle\") pod \"e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2\" (UID: \"e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2\") " Jan 21 15:47:34 crc kubenswrapper[5021]: I0121 15:47:34.586469 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2-scripts\") pod \"e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2\" (UID: \"e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2\") " Jan 21 15:47:34 crc kubenswrapper[5021]: I0121 15:47:34.586499 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2-config-data\") pod \"e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2\" (UID: \"e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2\") " Jan 21 15:47:34 crc kubenswrapper[5021]: I0121 15:47:34.592537 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2-kube-api-access-ff4fq" (OuterVolumeSpecName: "kube-api-access-ff4fq") pod "e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2" (UID: "e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2"). InnerVolumeSpecName "kube-api-access-ff4fq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:47:34 crc kubenswrapper[5021]: I0121 15:47:34.592944 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2-scripts" (OuterVolumeSpecName: "scripts") pod "e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2" (UID: "e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:47:34 crc kubenswrapper[5021]: I0121 15:47:34.616138 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2-config-data" (OuterVolumeSpecName: "config-data") pod "e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2" (UID: "e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:47:34 crc kubenswrapper[5021]: I0121 15:47:34.618928 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2" (UID: "e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:47:34 crc kubenswrapper[5021]: I0121 15:47:34.688349 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:47:34 crc kubenswrapper[5021]: I0121 15:47:34.688390 5021 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:47:34 crc kubenswrapper[5021]: I0121 15:47:34.688399 5021 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:47:34 crc kubenswrapper[5021]: I0121 15:47:34.688408 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ff4fq\" (UniqueName: \"kubernetes.io/projected/e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2-kube-api-access-ff4fq\") on node \"crc\" DevicePath \"\"" Jan 21 15:47:35 crc kubenswrapper[5021]: I0121 15:47:35.081334 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-s49kl" event={"ID":"e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2","Type":"ContainerDied","Data":"ef831429e7a06bedefffca02b858664950295b723bf6b2e1ae9eeb28644b9ac6"} Jan 21 15:47:35 crc kubenswrapper[5021]: I0121 15:47:35.081383 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ef831429e7a06bedefffca02b858664950295b723bf6b2e1ae9eeb28644b9ac6" Jan 21 15:47:35 crc kubenswrapper[5021]: I0121 15:47:35.081438 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-s49kl" Jan 21 15:47:35 crc kubenswrapper[5021]: I0121 15:47:35.176058 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 21 15:47:35 crc kubenswrapper[5021]: E0121 15:47:35.176532 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2" containerName="nova-cell1-conductor-db-sync" Jan 21 15:47:35 crc kubenswrapper[5021]: I0121 15:47:35.176559 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2" containerName="nova-cell1-conductor-db-sync" Jan 21 15:47:35 crc kubenswrapper[5021]: I0121 15:47:35.176813 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2" containerName="nova-cell1-conductor-db-sync" Jan 21 15:47:35 crc kubenswrapper[5021]: I0121 15:47:35.177581 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 21 15:47:35 crc kubenswrapper[5021]: I0121 15:47:35.183732 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Jan 21 15:47:35 crc kubenswrapper[5021]: I0121 15:47:35.191475 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 21 15:47:35 crc kubenswrapper[5021]: I0121 15:47:35.297954 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6ed9dcf-812f-4945-ac9d-43839bb27349-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"a6ed9dcf-812f-4945-ac9d-43839bb27349\") " pod="openstack/nova-cell1-conductor-0" Jan 21 15:47:35 crc kubenswrapper[5021]: I0121 15:47:35.298482 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6ed9dcf-812f-4945-ac9d-43839bb27349-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"a6ed9dcf-812f-4945-ac9d-43839bb27349\") " pod="openstack/nova-cell1-conductor-0" Jan 21 15:47:35 crc kubenswrapper[5021]: I0121 15:47:35.298963 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x77w7\" (UniqueName: \"kubernetes.io/projected/a6ed9dcf-812f-4945-ac9d-43839bb27349-kube-api-access-x77w7\") pod \"nova-cell1-conductor-0\" (UID: \"a6ed9dcf-812f-4945-ac9d-43839bb27349\") " pod="openstack/nova-cell1-conductor-0" Jan 21 15:47:35 crc kubenswrapper[5021]: I0121 15:47:35.400875 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6ed9dcf-812f-4945-ac9d-43839bb27349-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"a6ed9dcf-812f-4945-ac9d-43839bb27349\") " pod="openstack/nova-cell1-conductor-0" Jan 21 15:47:35 crc kubenswrapper[5021]: I0121 15:47:35.401445 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x77w7\" (UniqueName: \"kubernetes.io/projected/a6ed9dcf-812f-4945-ac9d-43839bb27349-kube-api-access-x77w7\") pod \"nova-cell1-conductor-0\" (UID: \"a6ed9dcf-812f-4945-ac9d-43839bb27349\") " pod="openstack/nova-cell1-conductor-0" Jan 21 15:47:35 crc kubenswrapper[5021]: I0121 15:47:35.401738 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6ed9dcf-812f-4945-ac9d-43839bb27349-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"a6ed9dcf-812f-4945-ac9d-43839bb27349\") " pod="openstack/nova-cell1-conductor-0" Jan 21 15:47:35 crc kubenswrapper[5021]: I0121 15:47:35.405550 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6ed9dcf-812f-4945-ac9d-43839bb27349-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"a6ed9dcf-812f-4945-ac9d-43839bb27349\") " pod="openstack/nova-cell1-conductor-0" Jan 21 15:47:35 crc kubenswrapper[5021]: I0121 15:47:35.407245 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6ed9dcf-812f-4945-ac9d-43839bb27349-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"a6ed9dcf-812f-4945-ac9d-43839bb27349\") " pod="openstack/nova-cell1-conductor-0" Jan 21 15:47:35 crc kubenswrapper[5021]: I0121 15:47:35.428324 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x77w7\" (UniqueName: \"kubernetes.io/projected/a6ed9dcf-812f-4945-ac9d-43839bb27349-kube-api-access-x77w7\") pod \"nova-cell1-conductor-0\" (UID: \"a6ed9dcf-812f-4945-ac9d-43839bb27349\") " pod="openstack/nova-cell1-conductor-0" Jan 21 15:47:35 crc kubenswrapper[5021]: I0121 15:47:35.495475 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 21 15:47:35 crc kubenswrapper[5021]: I0121 15:47:35.945457 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 21 15:47:36 crc kubenswrapper[5021]: I0121 15:47:36.094035 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"a6ed9dcf-812f-4945-ac9d-43839bb27349","Type":"ContainerStarted","Data":"849bfb0f3b8bb3a9d6d261a1faf0901d702687f18874bb4e8c9d0a7d202321d1"} Jan 21 15:47:36 crc kubenswrapper[5021]: I0121 15:47:36.403842 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 21 15:47:36 crc kubenswrapper[5021]: I0121 15:47:36.403940 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 21 15:47:37 crc kubenswrapper[5021]: I0121 15:47:37.486217 5021 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="85e5e930-dd20-4169-b38c-d8765b1aa977" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.198:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 21 15:47:37 crc kubenswrapper[5021]: I0121 15:47:37.486231 5021 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="85e5e930-dd20-4169-b38c-d8765b1aa977" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.198:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 21 15:47:38 crc kubenswrapper[5021]: I0121 15:47:38.114743 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"a6ed9dcf-812f-4945-ac9d-43839bb27349","Type":"ContainerStarted","Data":"c6c487064a50a13818b80449ec45c315dcbd4c10322323fbef91c6b9e74ca755"} Jan 21 15:47:38 crc kubenswrapper[5021]: I0121 15:47:38.114974 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Jan 21 15:47:38 crc kubenswrapper[5021]: I0121 15:47:38.142696 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=3.142672679 podStartE2EDuration="3.142672679s" podCreationTimestamp="2026-01-21 15:47:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:47:38.135559145 +0000 UTC m=+1399.670673054" watchObservedRunningTime="2026-01-21 15:47:38.142672679 +0000 UTC m=+1399.677786568" Jan 21 15:47:38 crc kubenswrapper[5021]: I0121 15:47:38.554408 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 21 15:47:38 crc kubenswrapper[5021]: I0121 15:47:38.558565 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 21 15:47:38 crc kubenswrapper[5021]: I0121 15:47:38.565515 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 21 15:47:39 crc kubenswrapper[5021]: I0121 15:47:39.132609 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 21 15:47:41 crc kubenswrapper[5021]: I0121 15:47:41.113408 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 21 15:47:41 crc kubenswrapper[5021]: I0121 15:47:41.147945 5021 generic.go:334] "Generic (PLEG): container finished" podID="06e71668-be48-4b4a-b4ae-c793c0d0aef7" containerID="9ac834745086521eee2d82824de04214bad26591b4d2ed1e33fc5aa7bd286016" exitCode=137 Jan 21 15:47:41 crc kubenswrapper[5021]: I0121 15:47:41.148958 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 21 15:47:41 crc kubenswrapper[5021]: I0121 15:47:41.149498 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"06e71668-be48-4b4a-b4ae-c793c0d0aef7","Type":"ContainerDied","Data":"9ac834745086521eee2d82824de04214bad26591b4d2ed1e33fc5aa7bd286016"} Jan 21 15:47:41 crc kubenswrapper[5021]: I0121 15:47:41.149526 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"06e71668-be48-4b4a-b4ae-c793c0d0aef7","Type":"ContainerDied","Data":"b64b62b36a657886e8a0e33b87ce86ed70076069cf0359799e420595646303a6"} Jan 21 15:47:41 crc kubenswrapper[5021]: I0121 15:47:41.149546 5021 scope.go:117] "RemoveContainer" containerID="9ac834745086521eee2d82824de04214bad26591b4d2ed1e33fc5aa7bd286016" Jan 21 15:47:41 crc kubenswrapper[5021]: I0121 15:47:41.177566 5021 scope.go:117] "RemoveContainer" containerID="9ac834745086521eee2d82824de04214bad26591b4d2ed1e33fc5aa7bd286016" Jan 21 15:47:41 crc kubenswrapper[5021]: E0121 15:47:41.178228 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9ac834745086521eee2d82824de04214bad26591b4d2ed1e33fc5aa7bd286016\": container with ID starting with 9ac834745086521eee2d82824de04214bad26591b4d2ed1e33fc5aa7bd286016 not found: ID does not exist" containerID="9ac834745086521eee2d82824de04214bad26591b4d2ed1e33fc5aa7bd286016" Jan 21 15:47:41 crc kubenswrapper[5021]: I0121 15:47:41.178294 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ac834745086521eee2d82824de04214bad26591b4d2ed1e33fc5aa7bd286016"} err="failed to get container status \"9ac834745086521eee2d82824de04214bad26591b4d2ed1e33fc5aa7bd286016\": rpc error: code = NotFound desc = could not find container \"9ac834745086521eee2d82824de04214bad26591b4d2ed1e33fc5aa7bd286016\": container with ID starting with 9ac834745086521eee2d82824de04214bad26591b4d2ed1e33fc5aa7bd286016 not found: ID does not exist" Jan 21 15:47:41 crc kubenswrapper[5021]: I0121 15:47:41.223557 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06e71668-be48-4b4a-b4ae-c793c0d0aef7-combined-ca-bundle\") pod \"06e71668-be48-4b4a-b4ae-c793c0d0aef7\" (UID: \"06e71668-be48-4b4a-b4ae-c793c0d0aef7\") " Jan 21 15:47:41 crc kubenswrapper[5021]: I0121 15:47:41.223641 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ws68h\" (UniqueName: \"kubernetes.io/projected/06e71668-be48-4b4a-b4ae-c793c0d0aef7-kube-api-access-ws68h\") pod \"06e71668-be48-4b4a-b4ae-c793c0d0aef7\" (UID: \"06e71668-be48-4b4a-b4ae-c793c0d0aef7\") " Jan 21 15:47:41 crc kubenswrapper[5021]: I0121 15:47:41.223713 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/06e71668-be48-4b4a-b4ae-c793c0d0aef7-config-data\") pod \"06e71668-be48-4b4a-b4ae-c793c0d0aef7\" (UID: \"06e71668-be48-4b4a-b4ae-c793c0d0aef7\") " Jan 21 15:47:41 crc kubenswrapper[5021]: I0121 15:47:41.231803 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/06e71668-be48-4b4a-b4ae-c793c0d0aef7-kube-api-access-ws68h" (OuterVolumeSpecName: "kube-api-access-ws68h") pod "06e71668-be48-4b4a-b4ae-c793c0d0aef7" (UID: "06e71668-be48-4b4a-b4ae-c793c0d0aef7"). InnerVolumeSpecName "kube-api-access-ws68h". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:47:41 crc kubenswrapper[5021]: E0121 15:47:41.250199 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/06e71668-be48-4b4a-b4ae-c793c0d0aef7-combined-ca-bundle podName:06e71668-be48-4b4a-b4ae-c793c0d0aef7 nodeName:}" failed. No retries permitted until 2026-01-21 15:47:41.750168979 +0000 UTC m=+1403.285282868 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "combined-ca-bundle" (UniqueName: "kubernetes.io/secret/06e71668-be48-4b4a-b4ae-c793c0d0aef7-combined-ca-bundle") pod "06e71668-be48-4b4a-b4ae-c793c0d0aef7" (UID: "06e71668-be48-4b4a-b4ae-c793c0d0aef7") : error deleting /var/lib/kubelet/pods/06e71668-be48-4b4a-b4ae-c793c0d0aef7/volume-subpaths: remove /var/lib/kubelet/pods/06e71668-be48-4b4a-b4ae-c793c0d0aef7/volume-subpaths: no such file or directory Jan 21 15:47:41 crc kubenswrapper[5021]: I0121 15:47:41.254956 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06e71668-be48-4b4a-b4ae-c793c0d0aef7-config-data" (OuterVolumeSpecName: "config-data") pod "06e71668-be48-4b4a-b4ae-c793c0d0aef7" (UID: "06e71668-be48-4b4a-b4ae-c793c0d0aef7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:47:41 crc kubenswrapper[5021]: I0121 15:47:41.327236 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ws68h\" (UniqueName: \"kubernetes.io/projected/06e71668-be48-4b4a-b4ae-c793c0d0aef7-kube-api-access-ws68h\") on node \"crc\" DevicePath \"\"" Jan 21 15:47:41 crc kubenswrapper[5021]: I0121 15:47:41.327577 5021 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/06e71668-be48-4b4a-b4ae-c793c0d0aef7-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:47:41 crc kubenswrapper[5021]: I0121 15:47:41.838342 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06e71668-be48-4b4a-b4ae-c793c0d0aef7-combined-ca-bundle\") pod \"06e71668-be48-4b4a-b4ae-c793c0d0aef7\" (UID: \"06e71668-be48-4b4a-b4ae-c793c0d0aef7\") " Jan 21 15:47:41 crc kubenswrapper[5021]: I0121 15:47:41.845966 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06e71668-be48-4b4a-b4ae-c793c0d0aef7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "06e71668-be48-4b4a-b4ae-c793c0d0aef7" (UID: "06e71668-be48-4b4a-b4ae-c793c0d0aef7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:47:41 crc kubenswrapper[5021]: I0121 15:47:41.942078 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06e71668-be48-4b4a-b4ae-c793c0d0aef7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:47:42 crc kubenswrapper[5021]: I0121 15:47:42.091880 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 21 15:47:42 crc kubenswrapper[5021]: I0121 15:47:42.105566 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 21 15:47:42 crc kubenswrapper[5021]: I0121 15:47:42.121068 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 21 15:47:42 crc kubenswrapper[5021]: E0121 15:47:42.121883 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06e71668-be48-4b4a-b4ae-c793c0d0aef7" containerName="nova-cell1-novncproxy-novncproxy" Jan 21 15:47:42 crc kubenswrapper[5021]: I0121 15:47:42.121930 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="06e71668-be48-4b4a-b4ae-c793c0d0aef7" containerName="nova-cell1-novncproxy-novncproxy" Jan 21 15:47:42 crc kubenswrapper[5021]: I0121 15:47:42.122176 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="06e71668-be48-4b4a-b4ae-c793c0d0aef7" containerName="nova-cell1-novncproxy-novncproxy" Jan 21 15:47:42 crc kubenswrapper[5021]: I0121 15:47:42.124562 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 21 15:47:42 crc kubenswrapper[5021]: I0121 15:47:42.130132 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Jan 21 15:47:42 crc kubenswrapper[5021]: I0121 15:47:42.130779 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Jan 21 15:47:42 crc kubenswrapper[5021]: I0121 15:47:42.130928 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Jan 21 15:47:42 crc kubenswrapper[5021]: I0121 15:47:42.134607 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 21 15:47:42 crc kubenswrapper[5021]: I0121 15:47:42.248592 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/7e624ae4-b10e-41c8-a09d-9b81cc213cf6-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"7e624ae4-b10e-41c8-a09d-9b81cc213cf6\") " pod="openstack/nova-cell1-novncproxy-0" Jan 21 15:47:42 crc kubenswrapper[5021]: I0121 15:47:42.248687 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e624ae4-b10e-41c8-a09d-9b81cc213cf6-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"7e624ae4-b10e-41c8-a09d-9b81cc213cf6\") " pod="openstack/nova-cell1-novncproxy-0" Jan 21 15:47:42 crc kubenswrapper[5021]: I0121 15:47:42.248724 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xjvrt\" (UniqueName: \"kubernetes.io/projected/7e624ae4-b10e-41c8-a09d-9b81cc213cf6-kube-api-access-xjvrt\") pod \"nova-cell1-novncproxy-0\" (UID: \"7e624ae4-b10e-41c8-a09d-9b81cc213cf6\") " pod="openstack/nova-cell1-novncproxy-0" Jan 21 15:47:42 crc kubenswrapper[5021]: I0121 15:47:42.248765 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/7e624ae4-b10e-41c8-a09d-9b81cc213cf6-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"7e624ae4-b10e-41c8-a09d-9b81cc213cf6\") " pod="openstack/nova-cell1-novncproxy-0" Jan 21 15:47:42 crc kubenswrapper[5021]: I0121 15:47:42.248827 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7e624ae4-b10e-41c8-a09d-9b81cc213cf6-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"7e624ae4-b10e-41c8-a09d-9b81cc213cf6\") " pod="openstack/nova-cell1-novncproxy-0" Jan 21 15:47:42 crc kubenswrapper[5021]: I0121 15:47:42.350560 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7e624ae4-b10e-41c8-a09d-9b81cc213cf6-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"7e624ae4-b10e-41c8-a09d-9b81cc213cf6\") " pod="openstack/nova-cell1-novncproxy-0" Jan 21 15:47:42 crc kubenswrapper[5021]: I0121 15:47:42.351012 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/7e624ae4-b10e-41c8-a09d-9b81cc213cf6-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"7e624ae4-b10e-41c8-a09d-9b81cc213cf6\") " pod="openstack/nova-cell1-novncproxy-0" Jan 21 15:47:42 crc kubenswrapper[5021]: I0121 15:47:42.351112 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e624ae4-b10e-41c8-a09d-9b81cc213cf6-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"7e624ae4-b10e-41c8-a09d-9b81cc213cf6\") " pod="openstack/nova-cell1-novncproxy-0" Jan 21 15:47:42 crc kubenswrapper[5021]: I0121 15:47:42.351163 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xjvrt\" (UniqueName: \"kubernetes.io/projected/7e624ae4-b10e-41c8-a09d-9b81cc213cf6-kube-api-access-xjvrt\") pod \"nova-cell1-novncproxy-0\" (UID: \"7e624ae4-b10e-41c8-a09d-9b81cc213cf6\") " pod="openstack/nova-cell1-novncproxy-0" Jan 21 15:47:42 crc kubenswrapper[5021]: I0121 15:47:42.351230 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/7e624ae4-b10e-41c8-a09d-9b81cc213cf6-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"7e624ae4-b10e-41c8-a09d-9b81cc213cf6\") " pod="openstack/nova-cell1-novncproxy-0" Jan 21 15:47:42 crc kubenswrapper[5021]: I0121 15:47:42.356418 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/7e624ae4-b10e-41c8-a09d-9b81cc213cf6-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"7e624ae4-b10e-41c8-a09d-9b81cc213cf6\") " pod="openstack/nova-cell1-novncproxy-0" Jan 21 15:47:42 crc kubenswrapper[5021]: I0121 15:47:42.356494 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e624ae4-b10e-41c8-a09d-9b81cc213cf6-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"7e624ae4-b10e-41c8-a09d-9b81cc213cf6\") " pod="openstack/nova-cell1-novncproxy-0" Jan 21 15:47:42 crc kubenswrapper[5021]: I0121 15:47:42.357312 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/7e624ae4-b10e-41c8-a09d-9b81cc213cf6-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"7e624ae4-b10e-41c8-a09d-9b81cc213cf6\") " pod="openstack/nova-cell1-novncproxy-0" Jan 21 15:47:42 crc kubenswrapper[5021]: I0121 15:47:42.358449 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7e624ae4-b10e-41c8-a09d-9b81cc213cf6-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"7e624ae4-b10e-41c8-a09d-9b81cc213cf6\") " pod="openstack/nova-cell1-novncproxy-0" Jan 21 15:47:42 crc kubenswrapper[5021]: I0121 15:47:42.372733 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xjvrt\" (UniqueName: \"kubernetes.io/projected/7e624ae4-b10e-41c8-a09d-9b81cc213cf6-kube-api-access-xjvrt\") pod \"nova-cell1-novncproxy-0\" (UID: \"7e624ae4-b10e-41c8-a09d-9b81cc213cf6\") " pod="openstack/nova-cell1-novncproxy-0" Jan 21 15:47:42 crc kubenswrapper[5021]: I0121 15:47:42.445761 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 21 15:47:42 crc kubenswrapper[5021]: I0121 15:47:42.773289 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="06e71668-be48-4b4a-b4ae-c793c0d0aef7" path="/var/lib/kubelet/pods/06e71668-be48-4b4a-b4ae-c793c0d0aef7/volumes" Jan 21 15:47:42 crc kubenswrapper[5021]: I0121 15:47:42.951646 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 21 15:47:43 crc kubenswrapper[5021]: I0121 15:47:43.171569 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"7e624ae4-b10e-41c8-a09d-9b81cc213cf6","Type":"ContainerStarted","Data":"6b9587a4cdb15e5e26d32f8dc0967f54ab509fc6c672120f56325c2f4b9f598c"} Jan 21 15:47:44 crc kubenswrapper[5021]: I0121 15:47:44.183764 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"7e624ae4-b10e-41c8-a09d-9b81cc213cf6","Type":"ContainerStarted","Data":"7a16782bd143894b1bf2e2108f8b5bf024d72b94f1b3f7fcd30087ce2e16d557"} Jan 21 15:47:44 crc kubenswrapper[5021]: I0121 15:47:44.225258 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.225232332 podStartE2EDuration="2.225232332s" podCreationTimestamp="2026-01-21 15:47:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:47:44.221275384 +0000 UTC m=+1405.756389263" watchObservedRunningTime="2026-01-21 15:47:44.225232332 +0000 UTC m=+1405.760346221" Jan 21 15:47:45 crc kubenswrapper[5021]: I0121 15:47:45.525213 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Jan 21 15:47:46 crc kubenswrapper[5021]: I0121 15:47:46.407689 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 21 15:47:46 crc kubenswrapper[5021]: I0121 15:47:46.407789 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 21 15:47:46 crc kubenswrapper[5021]: I0121 15:47:46.408742 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 21 15:47:46 crc kubenswrapper[5021]: I0121 15:47:46.408874 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 21 15:47:46 crc kubenswrapper[5021]: I0121 15:47:46.411765 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 21 15:47:46 crc kubenswrapper[5021]: I0121 15:47:46.412646 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 21 15:47:46 crc kubenswrapper[5021]: I0121 15:47:46.631392 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5c7b6c5df9-knczx"] Jan 21 15:47:46 crc kubenswrapper[5021]: I0121 15:47:46.634271 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c7b6c5df9-knczx" Jan 21 15:47:46 crc kubenswrapper[5021]: I0121 15:47:46.649945 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c7b6c5df9-knczx"] Jan 21 15:47:46 crc kubenswrapper[5021]: I0121 15:47:46.766133 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wpf76\" (UniqueName: \"kubernetes.io/projected/16b38c07-3cc7-45b6-9145-514af8206bdb-kube-api-access-wpf76\") pod \"dnsmasq-dns-5c7b6c5df9-knczx\" (UID: \"16b38c07-3cc7-45b6-9145-514af8206bdb\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-knczx" Jan 21 15:47:46 crc kubenswrapper[5021]: I0121 15:47:46.766355 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/16b38c07-3cc7-45b6-9145-514af8206bdb-dns-svc\") pod \"dnsmasq-dns-5c7b6c5df9-knczx\" (UID: \"16b38c07-3cc7-45b6-9145-514af8206bdb\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-knczx" Jan 21 15:47:46 crc kubenswrapper[5021]: I0121 15:47:46.766438 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/16b38c07-3cc7-45b6-9145-514af8206bdb-ovsdbserver-nb\") pod \"dnsmasq-dns-5c7b6c5df9-knczx\" (UID: \"16b38c07-3cc7-45b6-9145-514af8206bdb\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-knczx" Jan 21 15:47:46 crc kubenswrapper[5021]: I0121 15:47:46.766526 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/16b38c07-3cc7-45b6-9145-514af8206bdb-config\") pod \"dnsmasq-dns-5c7b6c5df9-knczx\" (UID: \"16b38c07-3cc7-45b6-9145-514af8206bdb\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-knczx" Jan 21 15:47:46 crc kubenswrapper[5021]: I0121 15:47:46.766590 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/16b38c07-3cc7-45b6-9145-514af8206bdb-dns-swift-storage-0\") pod \"dnsmasq-dns-5c7b6c5df9-knczx\" (UID: \"16b38c07-3cc7-45b6-9145-514af8206bdb\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-knczx" Jan 21 15:47:46 crc kubenswrapper[5021]: I0121 15:47:46.766953 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/16b38c07-3cc7-45b6-9145-514af8206bdb-ovsdbserver-sb\") pod \"dnsmasq-dns-5c7b6c5df9-knczx\" (UID: \"16b38c07-3cc7-45b6-9145-514af8206bdb\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-knczx" Jan 21 15:47:46 crc kubenswrapper[5021]: I0121 15:47:46.870891 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/16b38c07-3cc7-45b6-9145-514af8206bdb-dns-svc\") pod \"dnsmasq-dns-5c7b6c5df9-knczx\" (UID: \"16b38c07-3cc7-45b6-9145-514af8206bdb\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-knczx" Jan 21 15:47:46 crc kubenswrapper[5021]: I0121 15:47:46.871040 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/16b38c07-3cc7-45b6-9145-514af8206bdb-ovsdbserver-nb\") pod \"dnsmasq-dns-5c7b6c5df9-knczx\" (UID: \"16b38c07-3cc7-45b6-9145-514af8206bdb\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-knczx" Jan 21 15:47:46 crc kubenswrapper[5021]: I0121 15:47:46.871173 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/16b38c07-3cc7-45b6-9145-514af8206bdb-config\") pod \"dnsmasq-dns-5c7b6c5df9-knczx\" (UID: \"16b38c07-3cc7-45b6-9145-514af8206bdb\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-knczx" Jan 21 15:47:46 crc kubenswrapper[5021]: I0121 15:47:46.871235 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/16b38c07-3cc7-45b6-9145-514af8206bdb-dns-swift-storage-0\") pod \"dnsmasq-dns-5c7b6c5df9-knczx\" (UID: \"16b38c07-3cc7-45b6-9145-514af8206bdb\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-knczx" Jan 21 15:47:46 crc kubenswrapper[5021]: I0121 15:47:46.871298 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/16b38c07-3cc7-45b6-9145-514af8206bdb-ovsdbserver-sb\") pod \"dnsmasq-dns-5c7b6c5df9-knczx\" (UID: \"16b38c07-3cc7-45b6-9145-514af8206bdb\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-knczx" Jan 21 15:47:46 crc kubenswrapper[5021]: I0121 15:47:46.871351 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wpf76\" (UniqueName: \"kubernetes.io/projected/16b38c07-3cc7-45b6-9145-514af8206bdb-kube-api-access-wpf76\") pod \"dnsmasq-dns-5c7b6c5df9-knczx\" (UID: \"16b38c07-3cc7-45b6-9145-514af8206bdb\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-knczx" Jan 21 15:47:46 crc kubenswrapper[5021]: I0121 15:47:46.872639 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/16b38c07-3cc7-45b6-9145-514af8206bdb-dns-swift-storage-0\") pod \"dnsmasq-dns-5c7b6c5df9-knczx\" (UID: \"16b38c07-3cc7-45b6-9145-514af8206bdb\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-knczx" Jan 21 15:47:46 crc kubenswrapper[5021]: I0121 15:47:46.873181 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/16b38c07-3cc7-45b6-9145-514af8206bdb-ovsdbserver-sb\") pod \"dnsmasq-dns-5c7b6c5df9-knczx\" (UID: \"16b38c07-3cc7-45b6-9145-514af8206bdb\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-knczx" Jan 21 15:47:46 crc kubenswrapper[5021]: I0121 15:47:46.873464 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/16b38c07-3cc7-45b6-9145-514af8206bdb-dns-svc\") pod \"dnsmasq-dns-5c7b6c5df9-knczx\" (UID: \"16b38c07-3cc7-45b6-9145-514af8206bdb\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-knczx" Jan 21 15:47:46 crc kubenswrapper[5021]: I0121 15:47:46.874702 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/16b38c07-3cc7-45b6-9145-514af8206bdb-ovsdbserver-nb\") pod \"dnsmasq-dns-5c7b6c5df9-knczx\" (UID: \"16b38c07-3cc7-45b6-9145-514af8206bdb\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-knczx" Jan 21 15:47:46 crc kubenswrapper[5021]: I0121 15:47:46.876383 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/16b38c07-3cc7-45b6-9145-514af8206bdb-config\") pod \"dnsmasq-dns-5c7b6c5df9-knczx\" (UID: \"16b38c07-3cc7-45b6-9145-514af8206bdb\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-knczx" Jan 21 15:47:46 crc kubenswrapper[5021]: I0121 15:47:46.897779 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wpf76\" (UniqueName: \"kubernetes.io/projected/16b38c07-3cc7-45b6-9145-514af8206bdb-kube-api-access-wpf76\") pod \"dnsmasq-dns-5c7b6c5df9-knczx\" (UID: \"16b38c07-3cc7-45b6-9145-514af8206bdb\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-knczx" Jan 21 15:47:46 crc kubenswrapper[5021]: I0121 15:47:46.963376 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c7b6c5df9-knczx" Jan 21 15:47:47 crc kubenswrapper[5021]: I0121 15:47:47.446845 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Jan 21 15:47:47 crc kubenswrapper[5021]: I0121 15:47:47.582676 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c7b6c5df9-knczx"] Jan 21 15:47:47 crc kubenswrapper[5021]: W0121 15:47:47.596144 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod16b38c07_3cc7_45b6_9145_514af8206bdb.slice/crio-9516123ba13cef8727a8fc636d004ffa81acfc0fbf407464c74bc7dfe7471f25 WatchSource:0}: Error finding container 9516123ba13cef8727a8fc636d004ffa81acfc0fbf407464c74bc7dfe7471f25: Status 404 returned error can't find the container with id 9516123ba13cef8727a8fc636d004ffa81acfc0fbf407464c74bc7dfe7471f25 Jan 21 15:47:48 crc kubenswrapper[5021]: I0121 15:47:48.246189 5021 generic.go:334] "Generic (PLEG): container finished" podID="16b38c07-3cc7-45b6-9145-514af8206bdb" containerID="e76dec9ec11e29552fbf6fce215e84ba99770f62744ce04725833b8e21d00d65" exitCode=0 Jan 21 15:47:48 crc kubenswrapper[5021]: I0121 15:47:48.246242 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6c5df9-knczx" event={"ID":"16b38c07-3cc7-45b6-9145-514af8206bdb","Type":"ContainerDied","Data":"e76dec9ec11e29552fbf6fce215e84ba99770f62744ce04725833b8e21d00d65"} Jan 21 15:47:48 crc kubenswrapper[5021]: I0121 15:47:48.246886 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6c5df9-knczx" event={"ID":"16b38c07-3cc7-45b6-9145-514af8206bdb","Type":"ContainerStarted","Data":"9516123ba13cef8727a8fc636d004ffa81acfc0fbf407464c74bc7dfe7471f25"} Jan 21 15:47:48 crc kubenswrapper[5021]: I0121 15:47:48.913295 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 21 15:47:48 crc kubenswrapper[5021]: I0121 15:47:48.914105 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f83da032-e822-4650-8896-8fc86b81a081" containerName="sg-core" containerID="cri-o://a4d66d922163c42defd3f72405093645235a7a8b3072cd9967db3ad881dcf7ef" gracePeriod=30 Jan 21 15:47:48 crc kubenswrapper[5021]: I0121 15:47:48.914215 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f83da032-e822-4650-8896-8fc86b81a081" containerName="ceilometer-notification-agent" containerID="cri-o://b0180964b73b70183e501237d00aaaf411fec880e4b2d7aab7f930ac6d298dc2" gracePeriod=30 Jan 21 15:47:48 crc kubenswrapper[5021]: I0121 15:47:48.914285 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f83da032-e822-4650-8896-8fc86b81a081" containerName="ceilometer-central-agent" containerID="cri-o://2fe27286fc696de2912f3ca1c8f3f80477579502971375d7979b1752fffe3ab5" gracePeriod=30 Jan 21 15:47:48 crc kubenswrapper[5021]: I0121 15:47:48.914410 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f83da032-e822-4650-8896-8fc86b81a081" containerName="proxy-httpd" containerID="cri-o://df02703a4c529283cc77edfe3ac44c3e5122d4f268c215d2e025ed62f64682aa" gracePeriod=30 Jan 21 15:47:49 crc kubenswrapper[5021]: I0121 15:47:49.276424 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6c5df9-knczx" event={"ID":"16b38c07-3cc7-45b6-9145-514af8206bdb","Type":"ContainerStarted","Data":"2fc2eb7f65b6d060d0dd9c5922dc5172b7314bb18a053569c709b7586f92be06"} Jan 21 15:47:49 crc kubenswrapper[5021]: I0121 15:47:49.277605 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5c7b6c5df9-knczx" Jan 21 15:47:49 crc kubenswrapper[5021]: I0121 15:47:49.292713 5021 generic.go:334] "Generic (PLEG): container finished" podID="f83da032-e822-4650-8896-8fc86b81a081" containerID="df02703a4c529283cc77edfe3ac44c3e5122d4f268c215d2e025ed62f64682aa" exitCode=0 Jan 21 15:47:49 crc kubenswrapper[5021]: I0121 15:47:49.292759 5021 generic.go:334] "Generic (PLEG): container finished" podID="f83da032-e822-4650-8896-8fc86b81a081" containerID="a4d66d922163c42defd3f72405093645235a7a8b3072cd9967db3ad881dcf7ef" exitCode=2 Jan 21 15:47:49 crc kubenswrapper[5021]: I0121 15:47:49.292787 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f83da032-e822-4650-8896-8fc86b81a081","Type":"ContainerDied","Data":"df02703a4c529283cc77edfe3ac44c3e5122d4f268c215d2e025ed62f64682aa"} Jan 21 15:47:49 crc kubenswrapper[5021]: I0121 15:47:49.292818 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f83da032-e822-4650-8896-8fc86b81a081","Type":"ContainerDied","Data":"a4d66d922163c42defd3f72405093645235a7a8b3072cd9967db3ad881dcf7ef"} Jan 21 15:47:49 crc kubenswrapper[5021]: I0121 15:47:49.313628 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5c7b6c5df9-knczx" podStartSLOduration=3.313609737 podStartE2EDuration="3.313609737s" podCreationTimestamp="2026-01-21 15:47:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:47:49.310285447 +0000 UTC m=+1410.845399336" watchObservedRunningTime="2026-01-21 15:47:49.313609737 +0000 UTC m=+1410.848723626" Jan 21 15:47:49 crc kubenswrapper[5021]: I0121 15:47:49.353334 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 21 15:47:49 crc kubenswrapper[5021]: I0121 15:47:49.353957 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="85e5e930-dd20-4169-b38c-d8765b1aa977" containerName="nova-api-log" containerID="cri-o://e3dd7e462b45e8090d76ae54ff12517facf2b37b72205a25165623c3ca0ab54f" gracePeriod=30 Jan 21 15:47:49 crc kubenswrapper[5021]: I0121 15:47:49.354103 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="85e5e930-dd20-4169-b38c-d8765b1aa977" containerName="nova-api-api" containerID="cri-o://07aa1f35aef7f864c78780420f6e8b42ce204e3158ea4d0e365cd55f03ac962e" gracePeriod=30 Jan 21 15:47:50 crc kubenswrapper[5021]: I0121 15:47:50.310109 5021 generic.go:334] "Generic (PLEG): container finished" podID="f83da032-e822-4650-8896-8fc86b81a081" containerID="2fe27286fc696de2912f3ca1c8f3f80477579502971375d7979b1752fffe3ab5" exitCode=0 Jan 21 15:47:50 crc kubenswrapper[5021]: I0121 15:47:50.310329 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f83da032-e822-4650-8896-8fc86b81a081","Type":"ContainerDied","Data":"2fe27286fc696de2912f3ca1c8f3f80477579502971375d7979b1752fffe3ab5"} Jan 21 15:47:50 crc kubenswrapper[5021]: I0121 15:47:50.313646 5021 generic.go:334] "Generic (PLEG): container finished" podID="85e5e930-dd20-4169-b38c-d8765b1aa977" containerID="e3dd7e462b45e8090d76ae54ff12517facf2b37b72205a25165623c3ca0ab54f" exitCode=143 Jan 21 15:47:50 crc kubenswrapper[5021]: I0121 15:47:50.313880 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"85e5e930-dd20-4169-b38c-d8765b1aa977","Type":"ContainerDied","Data":"e3dd7e462b45e8090d76ae54ff12517facf2b37b72205a25165623c3ca0ab54f"} Jan 21 15:47:51 crc kubenswrapper[5021]: I0121 15:47:51.328341 5021 generic.go:334] "Generic (PLEG): container finished" podID="f83da032-e822-4650-8896-8fc86b81a081" containerID="b0180964b73b70183e501237d00aaaf411fec880e4b2d7aab7f930ac6d298dc2" exitCode=0 Jan 21 15:47:51 crc kubenswrapper[5021]: I0121 15:47:51.328442 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f83da032-e822-4650-8896-8fc86b81a081","Type":"ContainerDied","Data":"b0180964b73b70183e501237d00aaaf411fec880e4b2d7aab7f930ac6d298dc2"} Jan 21 15:47:51 crc kubenswrapper[5021]: I0121 15:47:51.496852 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 21 15:47:51 crc kubenswrapper[5021]: I0121 15:47:51.580144 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f83da032-e822-4650-8896-8fc86b81a081-scripts\") pod \"f83da032-e822-4650-8896-8fc86b81a081\" (UID: \"f83da032-e822-4650-8896-8fc86b81a081\") " Jan 21 15:47:51 crc kubenswrapper[5021]: I0121 15:47:51.580622 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f83da032-e822-4650-8896-8fc86b81a081-combined-ca-bundle\") pod \"f83da032-e822-4650-8896-8fc86b81a081\" (UID: \"f83da032-e822-4650-8896-8fc86b81a081\") " Jan 21 15:47:51 crc kubenswrapper[5021]: I0121 15:47:51.582250 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f83da032-e822-4650-8896-8fc86b81a081-run-httpd\") pod \"f83da032-e822-4650-8896-8fc86b81a081\" (UID: \"f83da032-e822-4650-8896-8fc86b81a081\") " Jan 21 15:47:51 crc kubenswrapper[5021]: I0121 15:47:51.582328 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bsv96\" (UniqueName: \"kubernetes.io/projected/f83da032-e822-4650-8896-8fc86b81a081-kube-api-access-bsv96\") pod \"f83da032-e822-4650-8896-8fc86b81a081\" (UID: \"f83da032-e822-4650-8896-8fc86b81a081\") " Jan 21 15:47:51 crc kubenswrapper[5021]: I0121 15:47:51.582370 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f83da032-e822-4650-8896-8fc86b81a081-ceilometer-tls-certs\") pod \"f83da032-e822-4650-8896-8fc86b81a081\" (UID: \"f83da032-e822-4650-8896-8fc86b81a081\") " Jan 21 15:47:51 crc kubenswrapper[5021]: I0121 15:47:51.582409 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f83da032-e822-4650-8896-8fc86b81a081-sg-core-conf-yaml\") pod \"f83da032-e822-4650-8896-8fc86b81a081\" (UID: \"f83da032-e822-4650-8896-8fc86b81a081\") " Jan 21 15:47:51 crc kubenswrapper[5021]: I0121 15:47:51.582474 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f83da032-e822-4650-8896-8fc86b81a081-config-data\") pod \"f83da032-e822-4650-8896-8fc86b81a081\" (UID: \"f83da032-e822-4650-8896-8fc86b81a081\") " Jan 21 15:47:51 crc kubenswrapper[5021]: I0121 15:47:51.582496 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f83da032-e822-4650-8896-8fc86b81a081-log-httpd\") pod \"f83da032-e822-4650-8896-8fc86b81a081\" (UID: \"f83da032-e822-4650-8896-8fc86b81a081\") " Jan 21 15:47:51 crc kubenswrapper[5021]: I0121 15:47:51.583686 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f83da032-e822-4650-8896-8fc86b81a081-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "f83da032-e822-4650-8896-8fc86b81a081" (UID: "f83da032-e822-4650-8896-8fc86b81a081"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:47:51 crc kubenswrapper[5021]: I0121 15:47:51.583886 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f83da032-e822-4650-8896-8fc86b81a081-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "f83da032-e822-4650-8896-8fc86b81a081" (UID: "f83da032-e822-4650-8896-8fc86b81a081"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:47:51 crc kubenswrapper[5021]: I0121 15:47:51.589505 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f83da032-e822-4650-8896-8fc86b81a081-kube-api-access-bsv96" (OuterVolumeSpecName: "kube-api-access-bsv96") pod "f83da032-e822-4650-8896-8fc86b81a081" (UID: "f83da032-e822-4650-8896-8fc86b81a081"). InnerVolumeSpecName "kube-api-access-bsv96". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:47:51 crc kubenswrapper[5021]: I0121 15:47:51.603591 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f83da032-e822-4650-8896-8fc86b81a081-scripts" (OuterVolumeSpecName: "scripts") pod "f83da032-e822-4650-8896-8fc86b81a081" (UID: "f83da032-e822-4650-8896-8fc86b81a081"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:47:51 crc kubenswrapper[5021]: I0121 15:47:51.685045 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f83da032-e822-4650-8896-8fc86b81a081-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "f83da032-e822-4650-8896-8fc86b81a081" (UID: "f83da032-e822-4650-8896-8fc86b81a081"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:47:51 crc kubenswrapper[5021]: I0121 15:47:51.686247 5021 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f83da032-e822-4650-8896-8fc86b81a081-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 15:47:51 crc kubenswrapper[5021]: I0121 15:47:51.686270 5021 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f83da032-e822-4650-8896-8fc86b81a081-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:47:51 crc kubenswrapper[5021]: I0121 15:47:51.686278 5021 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f83da032-e822-4650-8896-8fc86b81a081-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 15:47:51 crc kubenswrapper[5021]: I0121 15:47:51.686287 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bsv96\" (UniqueName: \"kubernetes.io/projected/f83da032-e822-4650-8896-8fc86b81a081-kube-api-access-bsv96\") on node \"crc\" DevicePath \"\"" Jan 21 15:47:51 crc kubenswrapper[5021]: I0121 15:47:51.686297 5021 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f83da032-e822-4650-8896-8fc86b81a081-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 21 15:47:51 crc kubenswrapper[5021]: I0121 15:47:51.744088 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f83da032-e822-4650-8896-8fc86b81a081-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "f83da032-e822-4650-8896-8fc86b81a081" (UID: "f83da032-e822-4650-8896-8fc86b81a081"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:47:51 crc kubenswrapper[5021]: I0121 15:47:51.753060 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f83da032-e822-4650-8896-8fc86b81a081-config-data" (OuterVolumeSpecName: "config-data") pod "f83da032-e822-4650-8896-8fc86b81a081" (UID: "f83da032-e822-4650-8896-8fc86b81a081"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:47:51 crc kubenswrapper[5021]: I0121 15:47:51.788430 5021 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f83da032-e822-4650-8896-8fc86b81a081-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 15:47:51 crc kubenswrapper[5021]: I0121 15:47:51.788483 5021 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f83da032-e822-4650-8896-8fc86b81a081-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:47:51 crc kubenswrapper[5021]: I0121 15:47:51.790117 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f83da032-e822-4650-8896-8fc86b81a081-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f83da032-e822-4650-8896-8fc86b81a081" (UID: "f83da032-e822-4650-8896-8fc86b81a081"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:47:51 crc kubenswrapper[5021]: I0121 15:47:51.890933 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f83da032-e822-4650-8896-8fc86b81a081-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:47:52 crc kubenswrapper[5021]: I0121 15:47:52.341858 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f83da032-e822-4650-8896-8fc86b81a081","Type":"ContainerDied","Data":"7e4217a362bd65fe55a98cf6c1da2cec8ee9d6d7a200be101e1426d765f9ece9"} Jan 21 15:47:52 crc kubenswrapper[5021]: I0121 15:47:52.341947 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 21 15:47:52 crc kubenswrapper[5021]: I0121 15:47:52.341950 5021 scope.go:117] "RemoveContainer" containerID="df02703a4c529283cc77edfe3ac44c3e5122d4f268c215d2e025ed62f64682aa" Jan 21 15:47:52 crc kubenswrapper[5021]: I0121 15:47:52.372120 5021 scope.go:117] "RemoveContainer" containerID="a4d66d922163c42defd3f72405093645235a7a8b3072cd9967db3ad881dcf7ef" Jan 21 15:47:52 crc kubenswrapper[5021]: I0121 15:47:52.387344 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 21 15:47:52 crc kubenswrapper[5021]: I0121 15:47:52.398852 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 21 15:47:52 crc kubenswrapper[5021]: I0121 15:47:52.399339 5021 scope.go:117] "RemoveContainer" containerID="b0180964b73b70183e501237d00aaaf411fec880e4b2d7aab7f930ac6d298dc2" Jan 21 15:47:52 crc kubenswrapper[5021]: I0121 15:47:52.416294 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 21 15:47:52 crc kubenswrapper[5021]: E0121 15:47:52.416847 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f83da032-e822-4650-8896-8fc86b81a081" containerName="ceilometer-central-agent" Jan 21 15:47:52 crc kubenswrapper[5021]: I0121 15:47:52.416872 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="f83da032-e822-4650-8896-8fc86b81a081" containerName="ceilometer-central-agent" Jan 21 15:47:52 crc kubenswrapper[5021]: E0121 15:47:52.416898 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f83da032-e822-4650-8896-8fc86b81a081" containerName="ceilometer-notification-agent" Jan 21 15:47:52 crc kubenswrapper[5021]: I0121 15:47:52.416936 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="f83da032-e822-4650-8896-8fc86b81a081" containerName="ceilometer-notification-agent" Jan 21 15:47:52 crc kubenswrapper[5021]: E0121 15:47:52.416964 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f83da032-e822-4650-8896-8fc86b81a081" containerName="proxy-httpd" Jan 21 15:47:52 crc kubenswrapper[5021]: I0121 15:47:52.416972 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="f83da032-e822-4650-8896-8fc86b81a081" containerName="proxy-httpd" Jan 21 15:47:52 crc kubenswrapper[5021]: E0121 15:47:52.416990 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f83da032-e822-4650-8896-8fc86b81a081" containerName="sg-core" Jan 21 15:47:52 crc kubenswrapper[5021]: I0121 15:47:52.416997 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="f83da032-e822-4650-8896-8fc86b81a081" containerName="sg-core" Jan 21 15:47:52 crc kubenswrapper[5021]: I0121 15:47:52.417199 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="f83da032-e822-4650-8896-8fc86b81a081" containerName="proxy-httpd" Jan 21 15:47:52 crc kubenswrapper[5021]: I0121 15:47:52.417223 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="f83da032-e822-4650-8896-8fc86b81a081" containerName="ceilometer-central-agent" Jan 21 15:47:52 crc kubenswrapper[5021]: I0121 15:47:52.417234 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="f83da032-e822-4650-8896-8fc86b81a081" containerName="sg-core" Jan 21 15:47:52 crc kubenswrapper[5021]: I0121 15:47:52.417258 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="f83da032-e822-4650-8896-8fc86b81a081" containerName="ceilometer-notification-agent" Jan 21 15:47:52 crc kubenswrapper[5021]: I0121 15:47:52.420341 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 21 15:47:52 crc kubenswrapper[5021]: I0121 15:47:52.425670 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 21 15:47:52 crc kubenswrapper[5021]: I0121 15:47:52.425726 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 21 15:47:52 crc kubenswrapper[5021]: I0121 15:47:52.425962 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Jan 21 15:47:52 crc kubenswrapper[5021]: I0121 15:47:52.427722 5021 scope.go:117] "RemoveContainer" containerID="2fe27286fc696de2912f3ca1c8f3f80477579502971375d7979b1752fffe3ab5" Jan 21 15:47:52 crc kubenswrapper[5021]: I0121 15:47:52.431466 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 21 15:47:52 crc kubenswrapper[5021]: I0121 15:47:52.447145 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Jan 21 15:47:52 crc kubenswrapper[5021]: I0121 15:47:52.471220 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Jan 21 15:47:52 crc kubenswrapper[5021]: I0121 15:47:52.506228 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/55bbd00b-56a2-42a4-a75a-39daba5e3ba6-scripts\") pod \"ceilometer-0\" (UID: \"55bbd00b-56a2-42a4-a75a-39daba5e3ba6\") " pod="openstack/ceilometer-0" Jan 21 15:47:52 crc kubenswrapper[5021]: I0121 15:47:52.506287 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55bbd00b-56a2-42a4-a75a-39daba5e3ba6-config-data\") pod \"ceilometer-0\" (UID: \"55bbd00b-56a2-42a4-a75a-39daba5e3ba6\") " pod="openstack/ceilometer-0" Jan 21 15:47:52 crc kubenswrapper[5021]: I0121 15:47:52.506329 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/55bbd00b-56a2-42a4-a75a-39daba5e3ba6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"55bbd00b-56a2-42a4-a75a-39daba5e3ba6\") " pod="openstack/ceilometer-0" Jan 21 15:47:52 crc kubenswrapper[5021]: I0121 15:47:52.506660 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55bbd00b-56a2-42a4-a75a-39daba5e3ba6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"55bbd00b-56a2-42a4-a75a-39daba5e3ba6\") " pod="openstack/ceilometer-0" Jan 21 15:47:52 crc kubenswrapper[5021]: I0121 15:47:52.506766 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fnfdf\" (UniqueName: \"kubernetes.io/projected/55bbd00b-56a2-42a4-a75a-39daba5e3ba6-kube-api-access-fnfdf\") pod \"ceilometer-0\" (UID: \"55bbd00b-56a2-42a4-a75a-39daba5e3ba6\") " pod="openstack/ceilometer-0" Jan 21 15:47:52 crc kubenswrapper[5021]: I0121 15:47:52.506807 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/55bbd00b-56a2-42a4-a75a-39daba5e3ba6-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"55bbd00b-56a2-42a4-a75a-39daba5e3ba6\") " pod="openstack/ceilometer-0" Jan 21 15:47:52 crc kubenswrapper[5021]: I0121 15:47:52.506834 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/55bbd00b-56a2-42a4-a75a-39daba5e3ba6-run-httpd\") pod \"ceilometer-0\" (UID: \"55bbd00b-56a2-42a4-a75a-39daba5e3ba6\") " pod="openstack/ceilometer-0" Jan 21 15:47:52 crc kubenswrapper[5021]: I0121 15:47:52.506896 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/55bbd00b-56a2-42a4-a75a-39daba5e3ba6-log-httpd\") pod \"ceilometer-0\" (UID: \"55bbd00b-56a2-42a4-a75a-39daba5e3ba6\") " pod="openstack/ceilometer-0" Jan 21 15:47:52 crc kubenswrapper[5021]: I0121 15:47:52.609461 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/55bbd00b-56a2-42a4-a75a-39daba5e3ba6-scripts\") pod \"ceilometer-0\" (UID: \"55bbd00b-56a2-42a4-a75a-39daba5e3ba6\") " pod="openstack/ceilometer-0" Jan 21 15:47:52 crc kubenswrapper[5021]: I0121 15:47:52.609887 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55bbd00b-56a2-42a4-a75a-39daba5e3ba6-config-data\") pod \"ceilometer-0\" (UID: \"55bbd00b-56a2-42a4-a75a-39daba5e3ba6\") " pod="openstack/ceilometer-0" Jan 21 15:47:52 crc kubenswrapper[5021]: I0121 15:47:52.609959 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/55bbd00b-56a2-42a4-a75a-39daba5e3ba6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"55bbd00b-56a2-42a4-a75a-39daba5e3ba6\") " pod="openstack/ceilometer-0" Jan 21 15:47:52 crc kubenswrapper[5021]: I0121 15:47:52.610051 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55bbd00b-56a2-42a4-a75a-39daba5e3ba6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"55bbd00b-56a2-42a4-a75a-39daba5e3ba6\") " pod="openstack/ceilometer-0" Jan 21 15:47:52 crc kubenswrapper[5021]: I0121 15:47:52.610095 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fnfdf\" (UniqueName: \"kubernetes.io/projected/55bbd00b-56a2-42a4-a75a-39daba5e3ba6-kube-api-access-fnfdf\") pod \"ceilometer-0\" (UID: \"55bbd00b-56a2-42a4-a75a-39daba5e3ba6\") " pod="openstack/ceilometer-0" Jan 21 15:47:52 crc kubenswrapper[5021]: I0121 15:47:52.610125 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/55bbd00b-56a2-42a4-a75a-39daba5e3ba6-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"55bbd00b-56a2-42a4-a75a-39daba5e3ba6\") " pod="openstack/ceilometer-0" Jan 21 15:47:52 crc kubenswrapper[5021]: I0121 15:47:52.610147 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/55bbd00b-56a2-42a4-a75a-39daba5e3ba6-run-httpd\") pod \"ceilometer-0\" (UID: \"55bbd00b-56a2-42a4-a75a-39daba5e3ba6\") " pod="openstack/ceilometer-0" Jan 21 15:47:52 crc kubenswrapper[5021]: I0121 15:47:52.610185 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/55bbd00b-56a2-42a4-a75a-39daba5e3ba6-log-httpd\") pod \"ceilometer-0\" (UID: \"55bbd00b-56a2-42a4-a75a-39daba5e3ba6\") " pod="openstack/ceilometer-0" Jan 21 15:47:52 crc kubenswrapper[5021]: I0121 15:47:52.610550 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/55bbd00b-56a2-42a4-a75a-39daba5e3ba6-run-httpd\") pod \"ceilometer-0\" (UID: \"55bbd00b-56a2-42a4-a75a-39daba5e3ba6\") " pod="openstack/ceilometer-0" Jan 21 15:47:52 crc kubenswrapper[5021]: I0121 15:47:52.610632 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/55bbd00b-56a2-42a4-a75a-39daba5e3ba6-log-httpd\") pod \"ceilometer-0\" (UID: \"55bbd00b-56a2-42a4-a75a-39daba5e3ba6\") " pod="openstack/ceilometer-0" Jan 21 15:47:52 crc kubenswrapper[5021]: I0121 15:47:52.616404 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/55bbd00b-56a2-42a4-a75a-39daba5e3ba6-scripts\") pod \"ceilometer-0\" (UID: \"55bbd00b-56a2-42a4-a75a-39daba5e3ba6\") " pod="openstack/ceilometer-0" Jan 21 15:47:52 crc kubenswrapper[5021]: I0121 15:47:52.616634 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/55bbd00b-56a2-42a4-a75a-39daba5e3ba6-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"55bbd00b-56a2-42a4-a75a-39daba5e3ba6\") " pod="openstack/ceilometer-0" Jan 21 15:47:52 crc kubenswrapper[5021]: I0121 15:47:52.617373 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55bbd00b-56a2-42a4-a75a-39daba5e3ba6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"55bbd00b-56a2-42a4-a75a-39daba5e3ba6\") " pod="openstack/ceilometer-0" Jan 21 15:47:52 crc kubenswrapper[5021]: I0121 15:47:52.617445 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55bbd00b-56a2-42a4-a75a-39daba5e3ba6-config-data\") pod \"ceilometer-0\" (UID: \"55bbd00b-56a2-42a4-a75a-39daba5e3ba6\") " pod="openstack/ceilometer-0" Jan 21 15:47:52 crc kubenswrapper[5021]: I0121 15:47:52.618496 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/55bbd00b-56a2-42a4-a75a-39daba5e3ba6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"55bbd00b-56a2-42a4-a75a-39daba5e3ba6\") " pod="openstack/ceilometer-0" Jan 21 15:47:52 crc kubenswrapper[5021]: I0121 15:47:52.627725 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fnfdf\" (UniqueName: \"kubernetes.io/projected/55bbd00b-56a2-42a4-a75a-39daba5e3ba6-kube-api-access-fnfdf\") pod \"ceilometer-0\" (UID: \"55bbd00b-56a2-42a4-a75a-39daba5e3ba6\") " pod="openstack/ceilometer-0" Jan 21 15:47:52 crc kubenswrapper[5021]: I0121 15:47:52.755424 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f83da032-e822-4650-8896-8fc86b81a081" path="/var/lib/kubelet/pods/f83da032-e822-4650-8896-8fc86b81a081/volumes" Jan 21 15:47:52 crc kubenswrapper[5021]: I0121 15:47:52.758279 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 21 15:47:53 crc kubenswrapper[5021]: I0121 15:47:53.362193 5021 generic.go:334] "Generic (PLEG): container finished" podID="85e5e930-dd20-4169-b38c-d8765b1aa977" containerID="07aa1f35aef7f864c78780420f6e8b42ce204e3158ea4d0e365cd55f03ac962e" exitCode=0 Jan 21 15:47:53 crc kubenswrapper[5021]: I0121 15:47:53.362754 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"85e5e930-dd20-4169-b38c-d8765b1aa977","Type":"ContainerDied","Data":"07aa1f35aef7f864c78780420f6e8b42ce204e3158ea4d0e365cd55f03ac962e"} Jan 21 15:47:53 crc kubenswrapper[5021]: I0121 15:47:53.393310 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 21 15:47:53 crc kubenswrapper[5021]: I0121 15:47:53.396965 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Jan 21 15:47:53 crc kubenswrapper[5021]: I0121 15:47:53.620772 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 21 15:47:53 crc kubenswrapper[5021]: I0121 15:47:53.644015 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-mqzs9"] Jan 21 15:47:53 crc kubenswrapper[5021]: E0121 15:47:53.644596 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85e5e930-dd20-4169-b38c-d8765b1aa977" containerName="nova-api-log" Jan 21 15:47:53 crc kubenswrapper[5021]: I0121 15:47:53.644617 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="85e5e930-dd20-4169-b38c-d8765b1aa977" containerName="nova-api-log" Jan 21 15:47:53 crc kubenswrapper[5021]: E0121 15:47:53.644632 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85e5e930-dd20-4169-b38c-d8765b1aa977" containerName="nova-api-api" Jan 21 15:47:53 crc kubenswrapper[5021]: I0121 15:47:53.644639 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="85e5e930-dd20-4169-b38c-d8765b1aa977" containerName="nova-api-api" Jan 21 15:47:53 crc kubenswrapper[5021]: I0121 15:47:53.644820 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="85e5e930-dd20-4169-b38c-d8765b1aa977" containerName="nova-api-log" Jan 21 15:47:53 crc kubenswrapper[5021]: I0121 15:47:53.644861 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="85e5e930-dd20-4169-b38c-d8765b1aa977" containerName="nova-api-api" Jan 21 15:47:53 crc kubenswrapper[5021]: I0121 15:47:53.645574 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-mqzs9" Jan 21 15:47:53 crc kubenswrapper[5021]: I0121 15:47:53.648481 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Jan 21 15:47:53 crc kubenswrapper[5021]: I0121 15:47:53.653808 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Jan 21 15:47:53 crc kubenswrapper[5021]: I0121 15:47:53.669016 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-mqzs9"] Jan 21 15:47:53 crc kubenswrapper[5021]: I0121 15:47:53.758401 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/85e5e930-dd20-4169-b38c-d8765b1aa977-config-data\") pod \"85e5e930-dd20-4169-b38c-d8765b1aa977\" (UID: \"85e5e930-dd20-4169-b38c-d8765b1aa977\") " Jan 21 15:47:53 crc kubenswrapper[5021]: I0121 15:47:53.758497 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/85e5e930-dd20-4169-b38c-d8765b1aa977-combined-ca-bundle\") pod \"85e5e930-dd20-4169-b38c-d8765b1aa977\" (UID: \"85e5e930-dd20-4169-b38c-d8765b1aa977\") " Jan 21 15:47:53 crc kubenswrapper[5021]: I0121 15:47:53.758567 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6hst8\" (UniqueName: \"kubernetes.io/projected/85e5e930-dd20-4169-b38c-d8765b1aa977-kube-api-access-6hst8\") pod \"85e5e930-dd20-4169-b38c-d8765b1aa977\" (UID: \"85e5e930-dd20-4169-b38c-d8765b1aa977\") " Jan 21 15:47:53 crc kubenswrapper[5021]: I0121 15:47:53.758646 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/85e5e930-dd20-4169-b38c-d8765b1aa977-logs\") pod \"85e5e930-dd20-4169-b38c-d8765b1aa977\" (UID: \"85e5e930-dd20-4169-b38c-d8765b1aa977\") " Jan 21 15:47:53 crc kubenswrapper[5021]: I0121 15:47:53.758968 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd0690b4-983b-42e0-91d3-fff22e7938c2-scripts\") pod \"nova-cell1-cell-mapping-mqzs9\" (UID: \"bd0690b4-983b-42e0-91d3-fff22e7938c2\") " pod="openstack/nova-cell1-cell-mapping-mqzs9" Jan 21 15:47:53 crc kubenswrapper[5021]: I0121 15:47:53.759097 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xg82q\" (UniqueName: \"kubernetes.io/projected/bd0690b4-983b-42e0-91d3-fff22e7938c2-kube-api-access-xg82q\") pod \"nova-cell1-cell-mapping-mqzs9\" (UID: \"bd0690b4-983b-42e0-91d3-fff22e7938c2\") " pod="openstack/nova-cell1-cell-mapping-mqzs9" Jan 21 15:47:53 crc kubenswrapper[5021]: I0121 15:47:53.759128 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd0690b4-983b-42e0-91d3-fff22e7938c2-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-mqzs9\" (UID: \"bd0690b4-983b-42e0-91d3-fff22e7938c2\") " pod="openstack/nova-cell1-cell-mapping-mqzs9" Jan 21 15:47:53 crc kubenswrapper[5021]: I0121 15:47:53.759166 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd0690b4-983b-42e0-91d3-fff22e7938c2-config-data\") pod \"nova-cell1-cell-mapping-mqzs9\" (UID: \"bd0690b4-983b-42e0-91d3-fff22e7938c2\") " pod="openstack/nova-cell1-cell-mapping-mqzs9" Jan 21 15:47:53 crc kubenswrapper[5021]: I0121 15:47:53.761653 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/85e5e930-dd20-4169-b38c-d8765b1aa977-logs" (OuterVolumeSpecName: "logs") pod "85e5e930-dd20-4169-b38c-d8765b1aa977" (UID: "85e5e930-dd20-4169-b38c-d8765b1aa977"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:47:53 crc kubenswrapper[5021]: I0121 15:47:53.765720 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/85e5e930-dd20-4169-b38c-d8765b1aa977-kube-api-access-6hst8" (OuterVolumeSpecName: "kube-api-access-6hst8") pod "85e5e930-dd20-4169-b38c-d8765b1aa977" (UID: "85e5e930-dd20-4169-b38c-d8765b1aa977"). InnerVolumeSpecName "kube-api-access-6hst8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:47:53 crc kubenswrapper[5021]: I0121 15:47:53.791756 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/85e5e930-dd20-4169-b38c-d8765b1aa977-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "85e5e930-dd20-4169-b38c-d8765b1aa977" (UID: "85e5e930-dd20-4169-b38c-d8765b1aa977"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:47:53 crc kubenswrapper[5021]: I0121 15:47:53.806227 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/85e5e930-dd20-4169-b38c-d8765b1aa977-config-data" (OuterVolumeSpecName: "config-data") pod "85e5e930-dd20-4169-b38c-d8765b1aa977" (UID: "85e5e930-dd20-4169-b38c-d8765b1aa977"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:47:53 crc kubenswrapper[5021]: I0121 15:47:53.860933 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd0690b4-983b-42e0-91d3-fff22e7938c2-config-data\") pod \"nova-cell1-cell-mapping-mqzs9\" (UID: \"bd0690b4-983b-42e0-91d3-fff22e7938c2\") " pod="openstack/nova-cell1-cell-mapping-mqzs9" Jan 21 15:47:53 crc kubenswrapper[5021]: I0121 15:47:53.861037 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd0690b4-983b-42e0-91d3-fff22e7938c2-scripts\") pod \"nova-cell1-cell-mapping-mqzs9\" (UID: \"bd0690b4-983b-42e0-91d3-fff22e7938c2\") " pod="openstack/nova-cell1-cell-mapping-mqzs9" Jan 21 15:47:53 crc kubenswrapper[5021]: I0121 15:47:53.861229 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xg82q\" (UniqueName: \"kubernetes.io/projected/bd0690b4-983b-42e0-91d3-fff22e7938c2-kube-api-access-xg82q\") pod \"nova-cell1-cell-mapping-mqzs9\" (UID: \"bd0690b4-983b-42e0-91d3-fff22e7938c2\") " pod="openstack/nova-cell1-cell-mapping-mqzs9" Jan 21 15:47:53 crc kubenswrapper[5021]: I0121 15:47:53.861275 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd0690b4-983b-42e0-91d3-fff22e7938c2-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-mqzs9\" (UID: \"bd0690b4-983b-42e0-91d3-fff22e7938c2\") " pod="openstack/nova-cell1-cell-mapping-mqzs9" Jan 21 15:47:53 crc kubenswrapper[5021]: I0121 15:47:53.861398 5021 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/85e5e930-dd20-4169-b38c-d8765b1aa977-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:47:53 crc kubenswrapper[5021]: I0121 15:47:53.861419 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/85e5e930-dd20-4169-b38c-d8765b1aa977-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:47:53 crc kubenswrapper[5021]: I0121 15:47:53.861433 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6hst8\" (UniqueName: \"kubernetes.io/projected/85e5e930-dd20-4169-b38c-d8765b1aa977-kube-api-access-6hst8\") on node \"crc\" DevicePath \"\"" Jan 21 15:47:53 crc kubenswrapper[5021]: I0121 15:47:53.861443 5021 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/85e5e930-dd20-4169-b38c-d8765b1aa977-logs\") on node \"crc\" DevicePath \"\"" Jan 21 15:47:53 crc kubenswrapper[5021]: I0121 15:47:53.864539 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd0690b4-983b-42e0-91d3-fff22e7938c2-config-data\") pod \"nova-cell1-cell-mapping-mqzs9\" (UID: \"bd0690b4-983b-42e0-91d3-fff22e7938c2\") " pod="openstack/nova-cell1-cell-mapping-mqzs9" Jan 21 15:47:53 crc kubenswrapper[5021]: I0121 15:47:53.865801 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd0690b4-983b-42e0-91d3-fff22e7938c2-scripts\") pod \"nova-cell1-cell-mapping-mqzs9\" (UID: \"bd0690b4-983b-42e0-91d3-fff22e7938c2\") " pod="openstack/nova-cell1-cell-mapping-mqzs9" Jan 21 15:47:53 crc kubenswrapper[5021]: I0121 15:47:53.868309 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd0690b4-983b-42e0-91d3-fff22e7938c2-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-mqzs9\" (UID: \"bd0690b4-983b-42e0-91d3-fff22e7938c2\") " pod="openstack/nova-cell1-cell-mapping-mqzs9" Jan 21 15:47:53 crc kubenswrapper[5021]: I0121 15:47:53.884435 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xg82q\" (UniqueName: \"kubernetes.io/projected/bd0690b4-983b-42e0-91d3-fff22e7938c2-kube-api-access-xg82q\") pod \"nova-cell1-cell-mapping-mqzs9\" (UID: \"bd0690b4-983b-42e0-91d3-fff22e7938c2\") " pod="openstack/nova-cell1-cell-mapping-mqzs9" Jan 21 15:47:53 crc kubenswrapper[5021]: I0121 15:47:53.981957 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-mqzs9" Jan 21 15:47:54 crc kubenswrapper[5021]: I0121 15:47:54.390320 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 21 15:47:54 crc kubenswrapper[5021]: I0121 15:47:54.390373 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"85e5e930-dd20-4169-b38c-d8765b1aa977","Type":"ContainerDied","Data":"e4d96d3054ccebe8ada7dd078e7bec0921f9ef91d4f4f2deb000fb622ecb6bfc"} Jan 21 15:47:54 crc kubenswrapper[5021]: I0121 15:47:54.390729 5021 scope.go:117] "RemoveContainer" containerID="07aa1f35aef7f864c78780420f6e8b42ce204e3158ea4d0e365cd55f03ac962e" Jan 21 15:47:54 crc kubenswrapper[5021]: I0121 15:47:54.402188 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"55bbd00b-56a2-42a4-a75a-39daba5e3ba6","Type":"ContainerStarted","Data":"5037559fb60350e2158a6a5f376df3b44cba445783affafde419d451bd46ca8e"} Jan 21 15:47:54 crc kubenswrapper[5021]: I0121 15:47:54.402232 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"55bbd00b-56a2-42a4-a75a-39daba5e3ba6","Type":"ContainerStarted","Data":"b4c373a3cf0a113ad7ec48c766242bd60778dae0ef5dccf264168c260bf583bd"} Jan 21 15:47:54 crc kubenswrapper[5021]: I0121 15:47:54.424037 5021 scope.go:117] "RemoveContainer" containerID="e3dd7e462b45e8090d76ae54ff12517facf2b37b72205a25165623c3ca0ab54f" Jan 21 15:47:54 crc kubenswrapper[5021]: I0121 15:47:54.432002 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 21 15:47:54 crc kubenswrapper[5021]: I0121 15:47:54.448581 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 21 15:47:54 crc kubenswrapper[5021]: I0121 15:47:54.457302 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 21 15:47:54 crc kubenswrapper[5021]: I0121 15:47:54.459793 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 21 15:47:54 crc kubenswrapper[5021]: I0121 15:47:54.466466 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Jan 21 15:47:54 crc kubenswrapper[5021]: I0121 15:47:54.466734 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Jan 21 15:47:54 crc kubenswrapper[5021]: I0121 15:47:54.467142 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 21 15:47:54 crc kubenswrapper[5021]: I0121 15:47:54.487780 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 21 15:47:54 crc kubenswrapper[5021]: W0121 15:47:54.518733 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbd0690b4_983b_42e0_91d3_fff22e7938c2.slice/crio-9519993089aa08401f22c892f9d4c5bdc494b64be381812e2defd99fc146f8f0 WatchSource:0}: Error finding container 9519993089aa08401f22c892f9d4c5bdc494b64be381812e2defd99fc146f8f0: Status 404 returned error can't find the container with id 9519993089aa08401f22c892f9d4c5bdc494b64be381812e2defd99fc146f8f0 Jan 21 15:47:54 crc kubenswrapper[5021]: I0121 15:47:54.527391 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-mqzs9"] Jan 21 15:47:54 crc kubenswrapper[5021]: I0121 15:47:54.581105 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/886b2999-2869-4743-a526-f80b798011ee-logs\") pod \"nova-api-0\" (UID: \"886b2999-2869-4743-a526-f80b798011ee\") " pod="openstack/nova-api-0" Jan 21 15:47:54 crc kubenswrapper[5021]: I0121 15:47:54.581207 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/886b2999-2869-4743-a526-f80b798011ee-internal-tls-certs\") pod \"nova-api-0\" (UID: \"886b2999-2869-4743-a526-f80b798011ee\") " pod="openstack/nova-api-0" Jan 21 15:47:54 crc kubenswrapper[5021]: I0121 15:47:54.581541 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/886b2999-2869-4743-a526-f80b798011ee-public-tls-certs\") pod \"nova-api-0\" (UID: \"886b2999-2869-4743-a526-f80b798011ee\") " pod="openstack/nova-api-0" Jan 21 15:47:54 crc kubenswrapper[5021]: I0121 15:47:54.581632 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/886b2999-2869-4743-a526-f80b798011ee-config-data\") pod \"nova-api-0\" (UID: \"886b2999-2869-4743-a526-f80b798011ee\") " pod="openstack/nova-api-0" Jan 21 15:47:54 crc kubenswrapper[5021]: I0121 15:47:54.581777 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/886b2999-2869-4743-a526-f80b798011ee-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"886b2999-2869-4743-a526-f80b798011ee\") " pod="openstack/nova-api-0" Jan 21 15:47:54 crc kubenswrapper[5021]: I0121 15:47:54.581924 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rkxr6\" (UniqueName: \"kubernetes.io/projected/886b2999-2869-4743-a526-f80b798011ee-kube-api-access-rkxr6\") pod \"nova-api-0\" (UID: \"886b2999-2869-4743-a526-f80b798011ee\") " pod="openstack/nova-api-0" Jan 21 15:47:54 crc kubenswrapper[5021]: I0121 15:47:54.684367 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rkxr6\" (UniqueName: \"kubernetes.io/projected/886b2999-2869-4743-a526-f80b798011ee-kube-api-access-rkxr6\") pod \"nova-api-0\" (UID: \"886b2999-2869-4743-a526-f80b798011ee\") " pod="openstack/nova-api-0" Jan 21 15:47:54 crc kubenswrapper[5021]: I0121 15:47:54.684483 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/886b2999-2869-4743-a526-f80b798011ee-logs\") pod \"nova-api-0\" (UID: \"886b2999-2869-4743-a526-f80b798011ee\") " pod="openstack/nova-api-0" Jan 21 15:47:54 crc kubenswrapper[5021]: I0121 15:47:54.684588 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/886b2999-2869-4743-a526-f80b798011ee-internal-tls-certs\") pod \"nova-api-0\" (UID: \"886b2999-2869-4743-a526-f80b798011ee\") " pod="openstack/nova-api-0" Jan 21 15:47:54 crc kubenswrapper[5021]: I0121 15:47:54.684709 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/886b2999-2869-4743-a526-f80b798011ee-public-tls-certs\") pod \"nova-api-0\" (UID: \"886b2999-2869-4743-a526-f80b798011ee\") " pod="openstack/nova-api-0" Jan 21 15:47:54 crc kubenswrapper[5021]: I0121 15:47:54.684778 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/886b2999-2869-4743-a526-f80b798011ee-config-data\") pod \"nova-api-0\" (UID: \"886b2999-2869-4743-a526-f80b798011ee\") " pod="openstack/nova-api-0" Jan 21 15:47:54 crc kubenswrapper[5021]: I0121 15:47:54.684880 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/886b2999-2869-4743-a526-f80b798011ee-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"886b2999-2869-4743-a526-f80b798011ee\") " pod="openstack/nova-api-0" Jan 21 15:47:54 crc kubenswrapper[5021]: I0121 15:47:54.687418 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/886b2999-2869-4743-a526-f80b798011ee-logs\") pod \"nova-api-0\" (UID: \"886b2999-2869-4743-a526-f80b798011ee\") " pod="openstack/nova-api-0" Jan 21 15:47:54 crc kubenswrapper[5021]: I0121 15:47:54.690499 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/886b2999-2869-4743-a526-f80b798011ee-config-data\") pod \"nova-api-0\" (UID: \"886b2999-2869-4743-a526-f80b798011ee\") " pod="openstack/nova-api-0" Jan 21 15:47:54 crc kubenswrapper[5021]: I0121 15:47:54.691027 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/886b2999-2869-4743-a526-f80b798011ee-internal-tls-certs\") pod \"nova-api-0\" (UID: \"886b2999-2869-4743-a526-f80b798011ee\") " pod="openstack/nova-api-0" Jan 21 15:47:54 crc kubenswrapper[5021]: I0121 15:47:54.691700 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/886b2999-2869-4743-a526-f80b798011ee-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"886b2999-2869-4743-a526-f80b798011ee\") " pod="openstack/nova-api-0" Jan 21 15:47:54 crc kubenswrapper[5021]: I0121 15:47:54.693104 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/886b2999-2869-4743-a526-f80b798011ee-public-tls-certs\") pod \"nova-api-0\" (UID: \"886b2999-2869-4743-a526-f80b798011ee\") " pod="openstack/nova-api-0" Jan 21 15:47:54 crc kubenswrapper[5021]: I0121 15:47:54.711119 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rkxr6\" (UniqueName: \"kubernetes.io/projected/886b2999-2869-4743-a526-f80b798011ee-kube-api-access-rkxr6\") pod \"nova-api-0\" (UID: \"886b2999-2869-4743-a526-f80b798011ee\") " pod="openstack/nova-api-0" Jan 21 15:47:54 crc kubenswrapper[5021]: I0121 15:47:54.750388 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="85e5e930-dd20-4169-b38c-d8765b1aa977" path="/var/lib/kubelet/pods/85e5e930-dd20-4169-b38c-d8765b1aa977/volumes" Jan 21 15:47:54 crc kubenswrapper[5021]: I0121 15:47:54.795107 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 21 15:47:55 crc kubenswrapper[5021]: I0121 15:47:55.335637 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 21 15:47:55 crc kubenswrapper[5021]: I0121 15:47:55.418232 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-mqzs9" event={"ID":"bd0690b4-983b-42e0-91d3-fff22e7938c2","Type":"ContainerStarted","Data":"57acaf70f1630d479893a569ee9512f44f17d7e93296529ac9b7d31b0b49df2b"} Jan 21 15:47:55 crc kubenswrapper[5021]: I0121 15:47:55.418288 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-mqzs9" event={"ID":"bd0690b4-983b-42e0-91d3-fff22e7938c2","Type":"ContainerStarted","Data":"9519993089aa08401f22c892f9d4c5bdc494b64be381812e2defd99fc146f8f0"} Jan 21 15:47:55 crc kubenswrapper[5021]: I0121 15:47:55.421129 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"55bbd00b-56a2-42a4-a75a-39daba5e3ba6","Type":"ContainerStarted","Data":"8d425e98d39b82e8ff834275afbbd09d41f458b740a780db59954fa6251d14e5"} Jan 21 15:47:55 crc kubenswrapper[5021]: I0121 15:47:55.438709 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"886b2999-2869-4743-a526-f80b798011ee","Type":"ContainerStarted","Data":"0759b80577371cc49cefb1bd28c9e466adeeab42325a5a65e3b96c91eaaf9d0f"} Jan 21 15:47:55 crc kubenswrapper[5021]: I0121 15:47:55.438961 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-mqzs9" podStartSLOduration=2.438948297 podStartE2EDuration="2.438948297s" podCreationTimestamp="2026-01-21 15:47:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:47:55.435674787 +0000 UTC m=+1416.970788676" watchObservedRunningTime="2026-01-21 15:47:55.438948297 +0000 UTC m=+1416.974062196" Jan 21 15:47:56 crc kubenswrapper[5021]: I0121 15:47:56.464661 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"886b2999-2869-4743-a526-f80b798011ee","Type":"ContainerStarted","Data":"9ed7178c97d50db4f9407454ecec73820dcb342e53d238b546403c0c3a753f93"} Jan 21 15:47:56 crc kubenswrapper[5021]: I0121 15:47:56.964623 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5c7b6c5df9-knczx" Jan 21 15:47:57 crc kubenswrapper[5021]: I0121 15:47:57.038317 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-865f5d856f-4ggfx"] Jan 21 15:47:57 crc kubenswrapper[5021]: I0121 15:47:57.038599 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-865f5d856f-4ggfx" podUID="d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16" containerName="dnsmasq-dns" containerID="cri-o://5ba36e3a2aa67b9c5a478ad80a2d8b5733c4b9d33eb64319eff2f705b846786a" gracePeriod=10 Jan 21 15:47:57 crc kubenswrapper[5021]: I0121 15:47:57.479499 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"55bbd00b-56a2-42a4-a75a-39daba5e3ba6","Type":"ContainerStarted","Data":"9e7ddce4cf8d1e90155d69318107e51e50dde636777370df6d9675e6e804665e"} Jan 21 15:47:57 crc kubenswrapper[5021]: I0121 15:47:57.482817 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"886b2999-2869-4743-a526-f80b798011ee","Type":"ContainerStarted","Data":"d469f6da18f82ed513ca23f58483baedbaa7e647a2993d4a661797b2c17d7f05"} Jan 21 15:47:57 crc kubenswrapper[5021]: I0121 15:47:57.487284 5021 generic.go:334] "Generic (PLEG): container finished" podID="d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16" containerID="5ba36e3a2aa67b9c5a478ad80a2d8b5733c4b9d33eb64319eff2f705b846786a" exitCode=0 Jan 21 15:47:57 crc kubenswrapper[5021]: I0121 15:47:57.487370 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-865f5d856f-4ggfx" event={"ID":"d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16","Type":"ContainerDied","Data":"5ba36e3a2aa67b9c5a478ad80a2d8b5733c4b9d33eb64319eff2f705b846786a"} Jan 21 15:47:57 crc kubenswrapper[5021]: I0121 15:47:57.507149 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.5071239690000002 podStartE2EDuration="3.507123969s" podCreationTimestamp="2026-01-21 15:47:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:47:57.504552159 +0000 UTC m=+1419.039666068" watchObservedRunningTime="2026-01-21 15:47:57.507123969 +0000 UTC m=+1419.042238098" Jan 21 15:47:58 crc kubenswrapper[5021]: I0121 15:47:58.388138 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-865f5d856f-4ggfx" Jan 21 15:47:58 crc kubenswrapper[5021]: I0121 15:47:58.470780 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16-ovsdbserver-nb\") pod \"d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16\" (UID: \"d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16\") " Jan 21 15:47:58 crc kubenswrapper[5021]: I0121 15:47:58.470936 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16-dns-svc\") pod \"d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16\" (UID: \"d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16\") " Jan 21 15:47:58 crc kubenswrapper[5021]: I0121 15:47:58.471904 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16-dns-swift-storage-0\") pod \"d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16\" (UID: \"d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16\") " Jan 21 15:47:58 crc kubenswrapper[5021]: I0121 15:47:58.472263 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zjrwc\" (UniqueName: \"kubernetes.io/projected/d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16-kube-api-access-zjrwc\") pod \"d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16\" (UID: \"d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16\") " Jan 21 15:47:58 crc kubenswrapper[5021]: I0121 15:47:58.472308 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16-config\") pod \"d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16\" (UID: \"d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16\") " Jan 21 15:47:58 crc kubenswrapper[5021]: I0121 15:47:58.472374 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16-ovsdbserver-sb\") pod \"d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16\" (UID: \"d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16\") " Jan 21 15:47:58 crc kubenswrapper[5021]: I0121 15:47:58.497175 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16-kube-api-access-zjrwc" (OuterVolumeSpecName: "kube-api-access-zjrwc") pod "d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16" (UID: "d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16"). InnerVolumeSpecName "kube-api-access-zjrwc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:47:58 crc kubenswrapper[5021]: I0121 15:47:58.520137 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-865f5d856f-4ggfx" Jan 21 15:47:58 crc kubenswrapper[5021]: I0121 15:47:58.520457 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-865f5d856f-4ggfx" event={"ID":"d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16","Type":"ContainerDied","Data":"f4918ddd3eeba0042bbd928ec5446ddd04d3de55f5e7cfa81ff90b4964898209"} Jan 21 15:47:58 crc kubenswrapper[5021]: I0121 15:47:58.520616 5021 scope.go:117] "RemoveContainer" containerID="5ba36e3a2aa67b9c5a478ad80a2d8b5733c4b9d33eb64319eff2f705b846786a" Jan 21 15:47:58 crc kubenswrapper[5021]: I0121 15:47:58.540155 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16" (UID: "d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:47:58 crc kubenswrapper[5021]: I0121 15:47:58.581427 5021 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 21 15:47:58 crc kubenswrapper[5021]: I0121 15:47:58.581464 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zjrwc\" (UniqueName: \"kubernetes.io/projected/d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16-kube-api-access-zjrwc\") on node \"crc\" DevicePath \"\"" Jan 21 15:47:58 crc kubenswrapper[5021]: I0121 15:47:58.581531 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16" (UID: "d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:47:58 crc kubenswrapper[5021]: I0121 15:47:58.589499 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16-config" (OuterVolumeSpecName: "config") pod "d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16" (UID: "d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:47:58 crc kubenswrapper[5021]: I0121 15:47:58.589694 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16" (UID: "d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:47:58 crc kubenswrapper[5021]: I0121 15:47:58.611672 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16" (UID: "d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:47:58 crc kubenswrapper[5021]: I0121 15:47:58.675241 5021 scope.go:117] "RemoveContainer" containerID="d2769a56c1fd9346b53309dbd592b331b72468d30b542d83da95caa81ea9fb5b" Jan 21 15:47:58 crc kubenswrapper[5021]: I0121 15:47:58.684856 5021 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 21 15:47:58 crc kubenswrapper[5021]: I0121 15:47:58.684898 5021 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 21 15:47:58 crc kubenswrapper[5021]: I0121 15:47:58.684932 5021 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:47:58 crc kubenswrapper[5021]: I0121 15:47:58.684946 5021 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 21 15:47:58 crc kubenswrapper[5021]: I0121 15:47:58.847845 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-865f5d856f-4ggfx"] Jan 21 15:47:58 crc kubenswrapper[5021]: I0121 15:47:58.861458 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-865f5d856f-4ggfx"] Jan 21 15:48:00 crc kubenswrapper[5021]: I0121 15:48:00.561488 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"55bbd00b-56a2-42a4-a75a-39daba5e3ba6","Type":"ContainerStarted","Data":"9e0d0434088b24ecb2f7a6a737f0cc34be54ffb9cbf7f5f7696a923ceacf6bc2"} Jan 21 15:48:00 crc kubenswrapper[5021]: I0121 15:48:00.562150 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 21 15:48:00 crc kubenswrapper[5021]: I0121 15:48:00.589983 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.785929463 podStartE2EDuration="8.589956758s" podCreationTimestamp="2026-01-21 15:47:52 +0000 UTC" firstStartedPulling="2026-01-21 15:47:53.399487566 +0000 UTC m=+1414.934601455" lastFinishedPulling="2026-01-21 15:47:59.203514861 +0000 UTC m=+1420.738628750" observedRunningTime="2026-01-21 15:48:00.583736658 +0000 UTC m=+1422.118850547" watchObservedRunningTime="2026-01-21 15:48:00.589956758 +0000 UTC m=+1422.125070647" Jan 21 15:48:00 crc kubenswrapper[5021]: I0121 15:48:00.751817 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16" path="/var/lib/kubelet/pods/d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16/volumes" Jan 21 15:48:01 crc kubenswrapper[5021]: I0121 15:48:01.572168 5021 generic.go:334] "Generic (PLEG): container finished" podID="bd0690b4-983b-42e0-91d3-fff22e7938c2" containerID="57acaf70f1630d479893a569ee9512f44f17d7e93296529ac9b7d31b0b49df2b" exitCode=0 Jan 21 15:48:01 crc kubenswrapper[5021]: I0121 15:48:01.572251 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-mqzs9" event={"ID":"bd0690b4-983b-42e0-91d3-fff22e7938c2","Type":"ContainerDied","Data":"57acaf70f1630d479893a569ee9512f44f17d7e93296529ac9b7d31b0b49df2b"} Jan 21 15:48:02 crc kubenswrapper[5021]: I0121 15:48:02.962851 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-mqzs9" Jan 21 15:48:03 crc kubenswrapper[5021]: I0121 15:48:03.080900 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd0690b4-983b-42e0-91d3-fff22e7938c2-scripts\") pod \"bd0690b4-983b-42e0-91d3-fff22e7938c2\" (UID: \"bd0690b4-983b-42e0-91d3-fff22e7938c2\") " Jan 21 15:48:03 crc kubenswrapper[5021]: I0121 15:48:03.081015 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xg82q\" (UniqueName: \"kubernetes.io/projected/bd0690b4-983b-42e0-91d3-fff22e7938c2-kube-api-access-xg82q\") pod \"bd0690b4-983b-42e0-91d3-fff22e7938c2\" (UID: \"bd0690b4-983b-42e0-91d3-fff22e7938c2\") " Jan 21 15:48:03 crc kubenswrapper[5021]: I0121 15:48:03.081089 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd0690b4-983b-42e0-91d3-fff22e7938c2-combined-ca-bundle\") pod \"bd0690b4-983b-42e0-91d3-fff22e7938c2\" (UID: \"bd0690b4-983b-42e0-91d3-fff22e7938c2\") " Jan 21 15:48:03 crc kubenswrapper[5021]: I0121 15:48:03.081129 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd0690b4-983b-42e0-91d3-fff22e7938c2-config-data\") pod \"bd0690b4-983b-42e0-91d3-fff22e7938c2\" (UID: \"bd0690b4-983b-42e0-91d3-fff22e7938c2\") " Jan 21 15:48:03 crc kubenswrapper[5021]: I0121 15:48:03.087692 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd0690b4-983b-42e0-91d3-fff22e7938c2-kube-api-access-xg82q" (OuterVolumeSpecName: "kube-api-access-xg82q") pod "bd0690b4-983b-42e0-91d3-fff22e7938c2" (UID: "bd0690b4-983b-42e0-91d3-fff22e7938c2"). InnerVolumeSpecName "kube-api-access-xg82q". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:48:03 crc kubenswrapper[5021]: I0121 15:48:03.088149 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd0690b4-983b-42e0-91d3-fff22e7938c2-scripts" (OuterVolumeSpecName: "scripts") pod "bd0690b4-983b-42e0-91d3-fff22e7938c2" (UID: "bd0690b4-983b-42e0-91d3-fff22e7938c2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:03 crc kubenswrapper[5021]: I0121 15:48:03.129183 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd0690b4-983b-42e0-91d3-fff22e7938c2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bd0690b4-983b-42e0-91d3-fff22e7938c2" (UID: "bd0690b4-983b-42e0-91d3-fff22e7938c2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:03 crc kubenswrapper[5021]: I0121 15:48:03.152734 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd0690b4-983b-42e0-91d3-fff22e7938c2-config-data" (OuterVolumeSpecName: "config-data") pod "bd0690b4-983b-42e0-91d3-fff22e7938c2" (UID: "bd0690b4-983b-42e0-91d3-fff22e7938c2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:03 crc kubenswrapper[5021]: I0121 15:48:03.183595 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd0690b4-983b-42e0-91d3-fff22e7938c2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:03 crc kubenswrapper[5021]: I0121 15:48:03.183654 5021 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd0690b4-983b-42e0-91d3-fff22e7938c2-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:03 crc kubenswrapper[5021]: I0121 15:48:03.183666 5021 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd0690b4-983b-42e0-91d3-fff22e7938c2-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:03 crc kubenswrapper[5021]: I0121 15:48:03.183676 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xg82q\" (UniqueName: \"kubernetes.io/projected/bd0690b4-983b-42e0-91d3-fff22e7938c2-kube-api-access-xg82q\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:03 crc kubenswrapper[5021]: I0121 15:48:03.594002 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-mqzs9" event={"ID":"bd0690b4-983b-42e0-91d3-fff22e7938c2","Type":"ContainerDied","Data":"9519993089aa08401f22c892f9d4c5bdc494b64be381812e2defd99fc146f8f0"} Jan 21 15:48:03 crc kubenswrapper[5021]: I0121 15:48:03.594331 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9519993089aa08401f22c892f9d4c5bdc494b64be381812e2defd99fc146f8f0" Jan 21 15:48:03 crc kubenswrapper[5021]: I0121 15:48:03.594079 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-mqzs9" Jan 21 15:48:03 crc kubenswrapper[5021]: I0121 15:48:03.783061 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 21 15:48:03 crc kubenswrapper[5021]: I0121 15:48:03.783643 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="886b2999-2869-4743-a526-f80b798011ee" containerName="nova-api-log" containerID="cri-o://9ed7178c97d50db4f9407454ecec73820dcb342e53d238b546403c0c3a753f93" gracePeriod=30 Jan 21 15:48:03 crc kubenswrapper[5021]: I0121 15:48:03.783709 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="886b2999-2869-4743-a526-f80b798011ee" containerName="nova-api-api" containerID="cri-o://d469f6da18f82ed513ca23f58483baedbaa7e647a2993d4a661797b2c17d7f05" gracePeriod=30 Jan 21 15:48:03 crc kubenswrapper[5021]: I0121 15:48:03.803712 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 21 15:48:03 crc kubenswrapper[5021]: I0121 15:48:03.803977 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="9b8a8a56-5cdf-4e01-863f-30d235d24321" containerName="nova-scheduler-scheduler" containerID="cri-o://0ce48b95bb2a19c2d7adfa1ac62aeaa3b83038faeb597d6cac4b65fd7f65db6f" gracePeriod=30 Jan 21 15:48:03 crc kubenswrapper[5021]: I0121 15:48:03.835191 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 21 15:48:03 crc kubenswrapper[5021]: I0121 15:48:03.835484 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="23436463-da03-498e-90e9-a224f2fa1600" containerName="nova-metadata-log" containerID="cri-o://d977c5fa121c11f08d5bce3d668c5a9e751eb49c2098d3b8f863976c805e8275" gracePeriod=30 Jan 21 15:48:03 crc kubenswrapper[5021]: I0121 15:48:03.835627 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="23436463-da03-498e-90e9-a224f2fa1600" containerName="nova-metadata-metadata" containerID="cri-o://2e86eac1f2449a41f78dad013fa386ac5187e07612bf88e22dc9295311e163b5" gracePeriod=30 Jan 21 15:48:04 crc kubenswrapper[5021]: I0121 15:48:04.605662 5021 generic.go:334] "Generic (PLEG): container finished" podID="886b2999-2869-4743-a526-f80b798011ee" containerID="d469f6da18f82ed513ca23f58483baedbaa7e647a2993d4a661797b2c17d7f05" exitCode=0 Jan 21 15:48:04 crc kubenswrapper[5021]: I0121 15:48:04.606003 5021 generic.go:334] "Generic (PLEG): container finished" podID="886b2999-2869-4743-a526-f80b798011ee" containerID="9ed7178c97d50db4f9407454ecec73820dcb342e53d238b546403c0c3a753f93" exitCode=143 Jan 21 15:48:04 crc kubenswrapper[5021]: I0121 15:48:04.605738 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"886b2999-2869-4743-a526-f80b798011ee","Type":"ContainerDied","Data":"d469f6da18f82ed513ca23f58483baedbaa7e647a2993d4a661797b2c17d7f05"} Jan 21 15:48:04 crc kubenswrapper[5021]: I0121 15:48:04.606107 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"886b2999-2869-4743-a526-f80b798011ee","Type":"ContainerDied","Data":"9ed7178c97d50db4f9407454ecec73820dcb342e53d238b546403c0c3a753f93"} Jan 21 15:48:04 crc kubenswrapper[5021]: I0121 15:48:04.608939 5021 generic.go:334] "Generic (PLEG): container finished" podID="23436463-da03-498e-90e9-a224f2fa1600" containerID="d977c5fa121c11f08d5bce3d668c5a9e751eb49c2098d3b8f863976c805e8275" exitCode=143 Jan 21 15:48:04 crc kubenswrapper[5021]: I0121 15:48:04.608968 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"23436463-da03-498e-90e9-a224f2fa1600","Type":"ContainerDied","Data":"d977c5fa121c11f08d5bce3d668c5a9e751eb49c2098d3b8f863976c805e8275"} Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.019255 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.122847 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/886b2999-2869-4743-a526-f80b798011ee-combined-ca-bundle\") pod \"886b2999-2869-4743-a526-f80b798011ee\" (UID: \"886b2999-2869-4743-a526-f80b798011ee\") " Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.123206 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/886b2999-2869-4743-a526-f80b798011ee-config-data\") pod \"886b2999-2869-4743-a526-f80b798011ee\" (UID: \"886b2999-2869-4743-a526-f80b798011ee\") " Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.123404 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/886b2999-2869-4743-a526-f80b798011ee-internal-tls-certs\") pod \"886b2999-2869-4743-a526-f80b798011ee\" (UID: \"886b2999-2869-4743-a526-f80b798011ee\") " Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.123553 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rkxr6\" (UniqueName: \"kubernetes.io/projected/886b2999-2869-4743-a526-f80b798011ee-kube-api-access-rkxr6\") pod \"886b2999-2869-4743-a526-f80b798011ee\" (UID: \"886b2999-2869-4743-a526-f80b798011ee\") " Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.123637 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/886b2999-2869-4743-a526-f80b798011ee-logs\") pod \"886b2999-2869-4743-a526-f80b798011ee\" (UID: \"886b2999-2869-4743-a526-f80b798011ee\") " Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.123755 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/886b2999-2869-4743-a526-f80b798011ee-public-tls-certs\") pod \"886b2999-2869-4743-a526-f80b798011ee\" (UID: \"886b2999-2869-4743-a526-f80b798011ee\") " Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.124070 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/886b2999-2869-4743-a526-f80b798011ee-logs" (OuterVolumeSpecName: "logs") pod "886b2999-2869-4743-a526-f80b798011ee" (UID: "886b2999-2869-4743-a526-f80b798011ee"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.124369 5021 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/886b2999-2869-4743-a526-f80b798011ee-logs\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.129004 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/886b2999-2869-4743-a526-f80b798011ee-kube-api-access-rkxr6" (OuterVolumeSpecName: "kube-api-access-rkxr6") pod "886b2999-2869-4743-a526-f80b798011ee" (UID: "886b2999-2869-4743-a526-f80b798011ee"). InnerVolumeSpecName "kube-api-access-rkxr6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.155089 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/886b2999-2869-4743-a526-f80b798011ee-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "886b2999-2869-4743-a526-f80b798011ee" (UID: "886b2999-2869-4743-a526-f80b798011ee"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.165589 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/886b2999-2869-4743-a526-f80b798011ee-config-data" (OuterVolumeSpecName: "config-data") pod "886b2999-2869-4743-a526-f80b798011ee" (UID: "886b2999-2869-4743-a526-f80b798011ee"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.190080 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/886b2999-2869-4743-a526-f80b798011ee-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "886b2999-2869-4743-a526-f80b798011ee" (UID: "886b2999-2869-4743-a526-f80b798011ee"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.204434 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/886b2999-2869-4743-a526-f80b798011ee-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "886b2999-2869-4743-a526-f80b798011ee" (UID: "886b2999-2869-4743-a526-f80b798011ee"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.226820 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rkxr6\" (UniqueName: \"kubernetes.io/projected/886b2999-2869-4743-a526-f80b798011ee-kube-api-access-rkxr6\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.226870 5021 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/886b2999-2869-4743-a526-f80b798011ee-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.226882 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/886b2999-2869-4743-a526-f80b798011ee-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.226893 5021 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/886b2999-2869-4743-a526-f80b798011ee-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.226921 5021 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/886b2999-2869-4743-a526-f80b798011ee-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:05 crc kubenswrapper[5021]: E0121 15:48:05.302456 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="0ce48b95bb2a19c2d7adfa1ac62aeaa3b83038faeb597d6cac4b65fd7f65db6f" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 21 15:48:05 crc kubenswrapper[5021]: E0121 15:48:05.305242 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="0ce48b95bb2a19c2d7adfa1ac62aeaa3b83038faeb597d6cac4b65fd7f65db6f" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 21 15:48:05 crc kubenswrapper[5021]: E0121 15:48:05.306641 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="0ce48b95bb2a19c2d7adfa1ac62aeaa3b83038faeb597d6cac4b65fd7f65db6f" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 21 15:48:05 crc kubenswrapper[5021]: E0121 15:48:05.306710 5021 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="9b8a8a56-5cdf-4e01-863f-30d235d24321" containerName="nova-scheduler-scheduler" Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.620230 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"886b2999-2869-4743-a526-f80b798011ee","Type":"ContainerDied","Data":"0759b80577371cc49cefb1bd28c9e466adeeab42325a5a65e3b96c91eaaf9d0f"} Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.620287 5021 scope.go:117] "RemoveContainer" containerID="d469f6da18f82ed513ca23f58483baedbaa7e647a2993d4a661797b2c17d7f05" Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.620318 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.652926 5021 scope.go:117] "RemoveContainer" containerID="9ed7178c97d50db4f9407454ecec73820dcb342e53d238b546403c0c3a753f93" Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.659848 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.674510 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.681619 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 21 15:48:05 crc kubenswrapper[5021]: E0121 15:48:05.682048 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="886b2999-2869-4743-a526-f80b798011ee" containerName="nova-api-api" Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.682103 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="886b2999-2869-4743-a526-f80b798011ee" containerName="nova-api-api" Jan 21 15:48:05 crc kubenswrapper[5021]: E0121 15:48:05.682244 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16" containerName="init" Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.682253 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16" containerName="init" Jan 21 15:48:05 crc kubenswrapper[5021]: E0121 15:48:05.682273 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16" containerName="dnsmasq-dns" Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.682369 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16" containerName="dnsmasq-dns" Jan 21 15:48:05 crc kubenswrapper[5021]: E0121 15:48:05.682381 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd0690b4-983b-42e0-91d3-fff22e7938c2" containerName="nova-manage" Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.682387 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd0690b4-983b-42e0-91d3-fff22e7938c2" containerName="nova-manage" Jan 21 15:48:05 crc kubenswrapper[5021]: E0121 15:48:05.682397 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="886b2999-2869-4743-a526-f80b798011ee" containerName="nova-api-log" Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.682402 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="886b2999-2869-4743-a526-f80b798011ee" containerName="nova-api-log" Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.682565 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="886b2999-2869-4743-a526-f80b798011ee" containerName="nova-api-api" Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.682580 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd0690b4-983b-42e0-91d3-fff22e7938c2" containerName="nova-manage" Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.682598 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="d7544c43-bf6b-4eda-aaa5-04d4c6fe1d16" containerName="dnsmasq-dns" Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.682609 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="886b2999-2869-4743-a526-f80b798011ee" containerName="nova-api-log" Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.683586 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.685796 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.691208 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.691085 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.704735 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.735245 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gb6b5\" (UniqueName: \"kubernetes.io/projected/ad959625-d43f-48c3-b42f-d35e63e9af44-kube-api-access-gb6b5\") pod \"nova-api-0\" (UID: \"ad959625-d43f-48c3-b42f-d35e63e9af44\") " pod="openstack/nova-api-0" Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.735394 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ad959625-d43f-48c3-b42f-d35e63e9af44-public-tls-certs\") pod \"nova-api-0\" (UID: \"ad959625-d43f-48c3-b42f-d35e63e9af44\") " pod="openstack/nova-api-0" Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.735707 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad959625-d43f-48c3-b42f-d35e63e9af44-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"ad959625-d43f-48c3-b42f-d35e63e9af44\") " pod="openstack/nova-api-0" Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.735787 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad959625-d43f-48c3-b42f-d35e63e9af44-config-data\") pod \"nova-api-0\" (UID: \"ad959625-d43f-48c3-b42f-d35e63e9af44\") " pod="openstack/nova-api-0" Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.736100 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ad959625-d43f-48c3-b42f-d35e63e9af44-logs\") pod \"nova-api-0\" (UID: \"ad959625-d43f-48c3-b42f-d35e63e9af44\") " pod="openstack/nova-api-0" Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.736171 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ad959625-d43f-48c3-b42f-d35e63e9af44-internal-tls-certs\") pod \"nova-api-0\" (UID: \"ad959625-d43f-48c3-b42f-d35e63e9af44\") " pod="openstack/nova-api-0" Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.837577 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad959625-d43f-48c3-b42f-d35e63e9af44-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"ad959625-d43f-48c3-b42f-d35e63e9af44\") " pod="openstack/nova-api-0" Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.837645 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad959625-d43f-48c3-b42f-d35e63e9af44-config-data\") pod \"nova-api-0\" (UID: \"ad959625-d43f-48c3-b42f-d35e63e9af44\") " pod="openstack/nova-api-0" Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.838607 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ad959625-d43f-48c3-b42f-d35e63e9af44-logs\") pod \"nova-api-0\" (UID: \"ad959625-d43f-48c3-b42f-d35e63e9af44\") " pod="openstack/nova-api-0" Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.839170 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ad959625-d43f-48c3-b42f-d35e63e9af44-internal-tls-certs\") pod \"nova-api-0\" (UID: \"ad959625-d43f-48c3-b42f-d35e63e9af44\") " pod="openstack/nova-api-0" Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.839254 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gb6b5\" (UniqueName: \"kubernetes.io/projected/ad959625-d43f-48c3-b42f-d35e63e9af44-kube-api-access-gb6b5\") pod \"nova-api-0\" (UID: \"ad959625-d43f-48c3-b42f-d35e63e9af44\") " pod="openstack/nova-api-0" Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.839298 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ad959625-d43f-48c3-b42f-d35e63e9af44-public-tls-certs\") pod \"nova-api-0\" (UID: \"ad959625-d43f-48c3-b42f-d35e63e9af44\") " pod="openstack/nova-api-0" Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.839584 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ad959625-d43f-48c3-b42f-d35e63e9af44-logs\") pod \"nova-api-0\" (UID: \"ad959625-d43f-48c3-b42f-d35e63e9af44\") " pod="openstack/nova-api-0" Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.841633 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad959625-d43f-48c3-b42f-d35e63e9af44-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"ad959625-d43f-48c3-b42f-d35e63e9af44\") " pod="openstack/nova-api-0" Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.842279 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ad959625-d43f-48c3-b42f-d35e63e9af44-internal-tls-certs\") pod \"nova-api-0\" (UID: \"ad959625-d43f-48c3-b42f-d35e63e9af44\") " pod="openstack/nova-api-0" Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.843311 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad959625-d43f-48c3-b42f-d35e63e9af44-config-data\") pod \"nova-api-0\" (UID: \"ad959625-d43f-48c3-b42f-d35e63e9af44\") " pod="openstack/nova-api-0" Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.843611 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ad959625-d43f-48c3-b42f-d35e63e9af44-public-tls-certs\") pod \"nova-api-0\" (UID: \"ad959625-d43f-48c3-b42f-d35e63e9af44\") " pod="openstack/nova-api-0" Jan 21 15:48:05 crc kubenswrapper[5021]: I0121 15:48:05.858536 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gb6b5\" (UniqueName: \"kubernetes.io/projected/ad959625-d43f-48c3-b42f-d35e63e9af44-kube-api-access-gb6b5\") pod \"nova-api-0\" (UID: \"ad959625-d43f-48c3-b42f-d35e63e9af44\") " pod="openstack/nova-api-0" Jan 21 15:48:06 crc kubenswrapper[5021]: I0121 15:48:06.007832 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 21 15:48:06 crc kubenswrapper[5021]: I0121 15:48:06.490041 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 21 15:48:06 crc kubenswrapper[5021]: W0121 15:48:06.498450 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podad959625_d43f_48c3_b42f_d35e63e9af44.slice/crio-8d1dcbb53dfb4f22cdb7b139dc6aed995e911cda8c1e7170f2e3e0b46df1dea0 WatchSource:0}: Error finding container 8d1dcbb53dfb4f22cdb7b139dc6aed995e911cda8c1e7170f2e3e0b46df1dea0: Status 404 returned error can't find the container with id 8d1dcbb53dfb4f22cdb7b139dc6aed995e911cda8c1e7170f2e3e0b46df1dea0 Jan 21 15:48:06 crc kubenswrapper[5021]: I0121 15:48:06.639790 5021 generic.go:334] "Generic (PLEG): container finished" podID="9b8a8a56-5cdf-4e01-863f-30d235d24321" containerID="0ce48b95bb2a19c2d7adfa1ac62aeaa3b83038faeb597d6cac4b65fd7f65db6f" exitCode=0 Jan 21 15:48:06 crc kubenswrapper[5021]: I0121 15:48:06.639840 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"9b8a8a56-5cdf-4e01-863f-30d235d24321","Type":"ContainerDied","Data":"0ce48b95bb2a19c2d7adfa1ac62aeaa3b83038faeb597d6cac4b65fd7f65db6f"} Jan 21 15:48:06 crc kubenswrapper[5021]: I0121 15:48:06.642230 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ad959625-d43f-48c3-b42f-d35e63e9af44","Type":"ContainerStarted","Data":"8d1dcbb53dfb4f22cdb7b139dc6aed995e911cda8c1e7170f2e3e0b46df1dea0"} Jan 21 15:48:06 crc kubenswrapper[5021]: I0121 15:48:06.756217 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="886b2999-2869-4743-a526-f80b798011ee" path="/var/lib/kubelet/pods/886b2999-2869-4743-a526-f80b798011ee/volumes" Jan 21 15:48:06 crc kubenswrapper[5021]: I0121 15:48:06.853500 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 21 15:48:06 crc kubenswrapper[5021]: I0121 15:48:06.966631 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b8a8a56-5cdf-4e01-863f-30d235d24321-combined-ca-bundle\") pod \"9b8a8a56-5cdf-4e01-863f-30d235d24321\" (UID: \"9b8a8a56-5cdf-4e01-863f-30d235d24321\") " Jan 21 15:48:06 crc kubenswrapper[5021]: I0121 15:48:06.966747 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w22wg\" (UniqueName: \"kubernetes.io/projected/9b8a8a56-5cdf-4e01-863f-30d235d24321-kube-api-access-w22wg\") pod \"9b8a8a56-5cdf-4e01-863f-30d235d24321\" (UID: \"9b8a8a56-5cdf-4e01-863f-30d235d24321\") " Jan 21 15:48:06 crc kubenswrapper[5021]: I0121 15:48:06.966781 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b8a8a56-5cdf-4e01-863f-30d235d24321-config-data\") pod \"9b8a8a56-5cdf-4e01-863f-30d235d24321\" (UID: \"9b8a8a56-5cdf-4e01-863f-30d235d24321\") " Jan 21 15:48:06 crc kubenswrapper[5021]: I0121 15:48:06.994536 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b8a8a56-5cdf-4e01-863f-30d235d24321-kube-api-access-w22wg" (OuterVolumeSpecName: "kube-api-access-w22wg") pod "9b8a8a56-5cdf-4e01-863f-30d235d24321" (UID: "9b8a8a56-5cdf-4e01-863f-30d235d24321"). InnerVolumeSpecName "kube-api-access-w22wg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:48:07 crc kubenswrapper[5021]: I0121 15:48:07.005567 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b8a8a56-5cdf-4e01-863f-30d235d24321-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9b8a8a56-5cdf-4e01-863f-30d235d24321" (UID: "9b8a8a56-5cdf-4e01-863f-30d235d24321"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:07 crc kubenswrapper[5021]: I0121 15:48:07.010874 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b8a8a56-5cdf-4e01-863f-30d235d24321-config-data" (OuterVolumeSpecName: "config-data") pod "9b8a8a56-5cdf-4e01-863f-30d235d24321" (UID: "9b8a8a56-5cdf-4e01-863f-30d235d24321"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:07 crc kubenswrapper[5021]: I0121 15:48:07.069387 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b8a8a56-5cdf-4e01-863f-30d235d24321-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:07 crc kubenswrapper[5021]: I0121 15:48:07.069696 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w22wg\" (UniqueName: \"kubernetes.io/projected/9b8a8a56-5cdf-4e01-863f-30d235d24321-kube-api-access-w22wg\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:07 crc kubenswrapper[5021]: I0121 15:48:07.069714 5021 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b8a8a56-5cdf-4e01-863f-30d235d24321-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:07 crc kubenswrapper[5021]: I0121 15:48:07.660655 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"9b8a8a56-5cdf-4e01-863f-30d235d24321","Type":"ContainerDied","Data":"444c89d1414feb2ac437f982f34bb6b292212218d81d452581a4a855ec152914"} Jan 21 15:48:07 crc kubenswrapper[5021]: I0121 15:48:07.661133 5021 scope.go:117] "RemoveContainer" containerID="0ce48b95bb2a19c2d7adfa1ac62aeaa3b83038faeb597d6cac4b65fd7f65db6f" Jan 21 15:48:07 crc kubenswrapper[5021]: I0121 15:48:07.661316 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 21 15:48:07 crc kubenswrapper[5021]: I0121 15:48:07.667195 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ad959625-d43f-48c3-b42f-d35e63e9af44","Type":"ContainerStarted","Data":"bfdbb4c8a7a050190ec619b218dfb9517faf71368ffc310feb532deef033dc55"} Jan 21 15:48:07 crc kubenswrapper[5021]: I0121 15:48:07.667253 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ad959625-d43f-48c3-b42f-d35e63e9af44","Type":"ContainerStarted","Data":"d3222a69d9fcc61f98b79a68e5ce69836f0becb1b379e4dc1551706c9de15b9d"} Jan 21 15:48:07 crc kubenswrapper[5021]: I0121 15:48:07.669281 5021 generic.go:334] "Generic (PLEG): container finished" podID="23436463-da03-498e-90e9-a224f2fa1600" containerID="2e86eac1f2449a41f78dad013fa386ac5187e07612bf88e22dc9295311e163b5" exitCode=0 Jan 21 15:48:07 crc kubenswrapper[5021]: I0121 15:48:07.669348 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"23436463-da03-498e-90e9-a224f2fa1600","Type":"ContainerDied","Data":"2e86eac1f2449a41f78dad013fa386ac5187e07612bf88e22dc9295311e163b5"} Jan 21 15:48:07 crc kubenswrapper[5021]: I0121 15:48:07.703419 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.70339243 podStartE2EDuration="2.70339243s" podCreationTimestamp="2026-01-21 15:48:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:48:07.699366284 +0000 UTC m=+1429.234480173" watchObservedRunningTime="2026-01-21 15:48:07.70339243 +0000 UTC m=+1429.238506329" Jan 21 15:48:07 crc kubenswrapper[5021]: I0121 15:48:07.778770 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 21 15:48:07 crc kubenswrapper[5021]: I0121 15:48:07.785605 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 21 15:48:07 crc kubenswrapper[5021]: I0121 15:48:07.797344 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 21 15:48:07 crc kubenswrapper[5021]: I0121 15:48:07.807266 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 21 15:48:07 crc kubenswrapper[5021]: E0121 15:48:07.808423 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b8a8a56-5cdf-4e01-863f-30d235d24321" containerName="nova-scheduler-scheduler" Jan 21 15:48:07 crc kubenswrapper[5021]: I0121 15:48:07.808448 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b8a8a56-5cdf-4e01-863f-30d235d24321" containerName="nova-scheduler-scheduler" Jan 21 15:48:07 crc kubenswrapper[5021]: E0121 15:48:07.808468 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="23436463-da03-498e-90e9-a224f2fa1600" containerName="nova-metadata-metadata" Jan 21 15:48:07 crc kubenswrapper[5021]: I0121 15:48:07.808475 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="23436463-da03-498e-90e9-a224f2fa1600" containerName="nova-metadata-metadata" Jan 21 15:48:07 crc kubenswrapper[5021]: E0121 15:48:07.808493 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="23436463-da03-498e-90e9-a224f2fa1600" containerName="nova-metadata-log" Jan 21 15:48:07 crc kubenswrapper[5021]: I0121 15:48:07.808501 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="23436463-da03-498e-90e9-a224f2fa1600" containerName="nova-metadata-log" Jan 21 15:48:07 crc kubenswrapper[5021]: I0121 15:48:07.808687 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="23436463-da03-498e-90e9-a224f2fa1600" containerName="nova-metadata-metadata" Jan 21 15:48:07 crc kubenswrapper[5021]: I0121 15:48:07.808700 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="23436463-da03-498e-90e9-a224f2fa1600" containerName="nova-metadata-log" Jan 21 15:48:07 crc kubenswrapper[5021]: I0121 15:48:07.808708 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b8a8a56-5cdf-4e01-863f-30d235d24321" containerName="nova-scheduler-scheduler" Jan 21 15:48:07 crc kubenswrapper[5021]: I0121 15:48:07.809393 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 21 15:48:07 crc kubenswrapper[5021]: I0121 15:48:07.812610 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 21 15:48:07 crc kubenswrapper[5021]: I0121 15:48:07.851812 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 21 15:48:07 crc kubenswrapper[5021]: I0121 15:48:07.885145 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/23436463-da03-498e-90e9-a224f2fa1600-config-data\") pod \"23436463-da03-498e-90e9-a224f2fa1600\" (UID: \"23436463-da03-498e-90e9-a224f2fa1600\") " Jan 21 15:48:07 crc kubenswrapper[5021]: I0121 15:48:07.885227 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/23436463-da03-498e-90e9-a224f2fa1600-combined-ca-bundle\") pod \"23436463-da03-498e-90e9-a224f2fa1600\" (UID: \"23436463-da03-498e-90e9-a224f2fa1600\") " Jan 21 15:48:07 crc kubenswrapper[5021]: I0121 15:48:07.885314 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/23436463-da03-498e-90e9-a224f2fa1600-logs\") pod \"23436463-da03-498e-90e9-a224f2fa1600\" (UID: \"23436463-da03-498e-90e9-a224f2fa1600\") " Jan 21 15:48:07 crc kubenswrapper[5021]: I0121 15:48:07.885346 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/23436463-da03-498e-90e9-a224f2fa1600-nova-metadata-tls-certs\") pod \"23436463-da03-498e-90e9-a224f2fa1600\" (UID: \"23436463-da03-498e-90e9-a224f2fa1600\") " Jan 21 15:48:07 crc kubenswrapper[5021]: I0121 15:48:07.885598 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6dc6z\" (UniqueName: \"kubernetes.io/projected/23436463-da03-498e-90e9-a224f2fa1600-kube-api-access-6dc6z\") pod \"23436463-da03-498e-90e9-a224f2fa1600\" (UID: \"23436463-da03-498e-90e9-a224f2fa1600\") " Jan 21 15:48:07 crc kubenswrapper[5021]: I0121 15:48:07.886000 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kt9fd\" (UniqueName: \"kubernetes.io/projected/0787e96e-5c19-467d-9ad4-ec70202c8cdf-kube-api-access-kt9fd\") pod \"nova-scheduler-0\" (UID: \"0787e96e-5c19-467d-9ad4-ec70202c8cdf\") " pod="openstack/nova-scheduler-0" Jan 21 15:48:07 crc kubenswrapper[5021]: I0121 15:48:07.886074 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0787e96e-5c19-467d-9ad4-ec70202c8cdf-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"0787e96e-5c19-467d-9ad4-ec70202c8cdf\") " pod="openstack/nova-scheduler-0" Jan 21 15:48:07 crc kubenswrapper[5021]: I0121 15:48:07.886100 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0787e96e-5c19-467d-9ad4-ec70202c8cdf-config-data\") pod \"nova-scheduler-0\" (UID: \"0787e96e-5c19-467d-9ad4-ec70202c8cdf\") " pod="openstack/nova-scheduler-0" Jan 21 15:48:07 crc kubenswrapper[5021]: I0121 15:48:07.907960 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/23436463-da03-498e-90e9-a224f2fa1600-logs" (OuterVolumeSpecName: "logs") pod "23436463-da03-498e-90e9-a224f2fa1600" (UID: "23436463-da03-498e-90e9-a224f2fa1600"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:48:07 crc kubenswrapper[5021]: I0121 15:48:07.920173 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/23436463-da03-498e-90e9-a224f2fa1600-kube-api-access-6dc6z" (OuterVolumeSpecName: "kube-api-access-6dc6z") pod "23436463-da03-498e-90e9-a224f2fa1600" (UID: "23436463-da03-498e-90e9-a224f2fa1600"). InnerVolumeSpecName "kube-api-access-6dc6z". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:48:07 crc kubenswrapper[5021]: I0121 15:48:07.960899 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/23436463-da03-498e-90e9-a224f2fa1600-config-data" (OuterVolumeSpecName: "config-data") pod "23436463-da03-498e-90e9-a224f2fa1600" (UID: "23436463-da03-498e-90e9-a224f2fa1600"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:07 crc kubenswrapper[5021]: I0121 15:48:07.988106 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kt9fd\" (UniqueName: \"kubernetes.io/projected/0787e96e-5c19-467d-9ad4-ec70202c8cdf-kube-api-access-kt9fd\") pod \"nova-scheduler-0\" (UID: \"0787e96e-5c19-467d-9ad4-ec70202c8cdf\") " pod="openstack/nova-scheduler-0" Jan 21 15:48:07 crc kubenswrapper[5021]: I0121 15:48:07.988205 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0787e96e-5c19-467d-9ad4-ec70202c8cdf-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"0787e96e-5c19-467d-9ad4-ec70202c8cdf\") " pod="openstack/nova-scheduler-0" Jan 21 15:48:07 crc kubenswrapper[5021]: I0121 15:48:07.988233 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0787e96e-5c19-467d-9ad4-ec70202c8cdf-config-data\") pod \"nova-scheduler-0\" (UID: \"0787e96e-5c19-467d-9ad4-ec70202c8cdf\") " pod="openstack/nova-scheduler-0" Jan 21 15:48:07 crc kubenswrapper[5021]: I0121 15:48:07.988344 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6dc6z\" (UniqueName: \"kubernetes.io/projected/23436463-da03-498e-90e9-a224f2fa1600-kube-api-access-6dc6z\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:07 crc kubenswrapper[5021]: I0121 15:48:07.988354 5021 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/23436463-da03-498e-90e9-a224f2fa1600-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:07 crc kubenswrapper[5021]: I0121 15:48:07.988363 5021 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/23436463-da03-498e-90e9-a224f2fa1600-logs\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:08 crc kubenswrapper[5021]: I0121 15:48:08.014026 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0787e96e-5c19-467d-9ad4-ec70202c8cdf-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"0787e96e-5c19-467d-9ad4-ec70202c8cdf\") " pod="openstack/nova-scheduler-0" Jan 21 15:48:08 crc kubenswrapper[5021]: I0121 15:48:08.019603 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0787e96e-5c19-467d-9ad4-ec70202c8cdf-config-data\") pod \"nova-scheduler-0\" (UID: \"0787e96e-5c19-467d-9ad4-ec70202c8cdf\") " pod="openstack/nova-scheduler-0" Jan 21 15:48:08 crc kubenswrapper[5021]: I0121 15:48:08.054721 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kt9fd\" (UniqueName: \"kubernetes.io/projected/0787e96e-5c19-467d-9ad4-ec70202c8cdf-kube-api-access-kt9fd\") pod \"nova-scheduler-0\" (UID: \"0787e96e-5c19-467d-9ad4-ec70202c8cdf\") " pod="openstack/nova-scheduler-0" Jan 21 15:48:08 crc kubenswrapper[5021]: I0121 15:48:08.129138 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/23436463-da03-498e-90e9-a224f2fa1600-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "23436463-da03-498e-90e9-a224f2fa1600" (UID: "23436463-da03-498e-90e9-a224f2fa1600"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:08 crc kubenswrapper[5021]: I0121 15:48:08.130811 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/23436463-da03-498e-90e9-a224f2fa1600-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "23436463-da03-498e-90e9-a224f2fa1600" (UID: "23436463-da03-498e-90e9-a224f2fa1600"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:08 crc kubenswrapper[5021]: I0121 15:48:08.135830 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 21 15:48:08 crc kubenswrapper[5021]: I0121 15:48:08.195409 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/23436463-da03-498e-90e9-a224f2fa1600-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:08 crc kubenswrapper[5021]: I0121 15:48:08.195458 5021 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/23436463-da03-498e-90e9-a224f2fa1600-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:08 crc kubenswrapper[5021]: I0121 15:48:08.603054 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 21 15:48:08 crc kubenswrapper[5021]: W0121 15:48:08.612581 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0787e96e_5c19_467d_9ad4_ec70202c8cdf.slice/crio-f48f3a48157f783b9f28a75f12a4e2f6551c62f71bff00ff6f521f0d158846a5 WatchSource:0}: Error finding container f48f3a48157f783b9f28a75f12a4e2f6551c62f71bff00ff6f521f0d158846a5: Status 404 returned error can't find the container with id f48f3a48157f783b9f28a75f12a4e2f6551c62f71bff00ff6f521f0d158846a5 Jan 21 15:48:08 crc kubenswrapper[5021]: I0121 15:48:08.682035 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"23436463-da03-498e-90e9-a224f2fa1600","Type":"ContainerDied","Data":"64e8f5339afdedfee7db1007103ed997ea7f9bc1f54129e879b6404243e84219"} Jan 21 15:48:08 crc kubenswrapper[5021]: I0121 15:48:08.682092 5021 scope.go:117] "RemoveContainer" containerID="2e86eac1f2449a41f78dad013fa386ac5187e07612bf88e22dc9295311e163b5" Jan 21 15:48:08 crc kubenswrapper[5021]: I0121 15:48:08.682057 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 21 15:48:08 crc kubenswrapper[5021]: I0121 15:48:08.685082 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"0787e96e-5c19-467d-9ad4-ec70202c8cdf","Type":"ContainerStarted","Data":"f48f3a48157f783b9f28a75f12a4e2f6551c62f71bff00ff6f521f0d158846a5"} Jan 21 15:48:08 crc kubenswrapper[5021]: I0121 15:48:08.762505 5021 scope.go:117] "RemoveContainer" containerID="d977c5fa121c11f08d5bce3d668c5a9e751eb49c2098d3b8f863976c805e8275" Jan 21 15:48:08 crc kubenswrapper[5021]: I0121 15:48:08.765241 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b8a8a56-5cdf-4e01-863f-30d235d24321" path="/var/lib/kubelet/pods/9b8a8a56-5cdf-4e01-863f-30d235d24321/volumes" Jan 21 15:48:08 crc kubenswrapper[5021]: I0121 15:48:08.766049 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 21 15:48:08 crc kubenswrapper[5021]: I0121 15:48:08.766087 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 21 15:48:08 crc kubenswrapper[5021]: I0121 15:48:08.788425 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 21 15:48:08 crc kubenswrapper[5021]: I0121 15:48:08.790649 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 21 15:48:08 crc kubenswrapper[5021]: I0121 15:48:08.797235 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 21 15:48:08 crc kubenswrapper[5021]: I0121 15:48:08.799901 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 21 15:48:08 crc kubenswrapper[5021]: I0121 15:48:08.804640 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 21 15:48:08 crc kubenswrapper[5021]: I0121 15:48:08.910122 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/110a1110-f52a-40e4-8402-166be87650a8-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"110a1110-f52a-40e4-8402-166be87650a8\") " pod="openstack/nova-metadata-0" Jan 21 15:48:08 crc kubenswrapper[5021]: I0121 15:48:08.910328 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/110a1110-f52a-40e4-8402-166be87650a8-logs\") pod \"nova-metadata-0\" (UID: \"110a1110-f52a-40e4-8402-166be87650a8\") " pod="openstack/nova-metadata-0" Jan 21 15:48:08 crc kubenswrapper[5021]: I0121 15:48:08.910389 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/110a1110-f52a-40e4-8402-166be87650a8-config-data\") pod \"nova-metadata-0\" (UID: \"110a1110-f52a-40e4-8402-166be87650a8\") " pod="openstack/nova-metadata-0" Jan 21 15:48:08 crc kubenswrapper[5021]: I0121 15:48:08.910461 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/110a1110-f52a-40e4-8402-166be87650a8-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"110a1110-f52a-40e4-8402-166be87650a8\") " pod="openstack/nova-metadata-0" Jan 21 15:48:08 crc kubenswrapper[5021]: I0121 15:48:08.910546 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ctsq9\" (UniqueName: \"kubernetes.io/projected/110a1110-f52a-40e4-8402-166be87650a8-kube-api-access-ctsq9\") pod \"nova-metadata-0\" (UID: \"110a1110-f52a-40e4-8402-166be87650a8\") " pod="openstack/nova-metadata-0" Jan 21 15:48:09 crc kubenswrapper[5021]: I0121 15:48:09.012058 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/110a1110-f52a-40e4-8402-166be87650a8-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"110a1110-f52a-40e4-8402-166be87650a8\") " pod="openstack/nova-metadata-0" Jan 21 15:48:09 crc kubenswrapper[5021]: I0121 15:48:09.012124 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/110a1110-f52a-40e4-8402-166be87650a8-logs\") pod \"nova-metadata-0\" (UID: \"110a1110-f52a-40e4-8402-166be87650a8\") " pod="openstack/nova-metadata-0" Jan 21 15:48:09 crc kubenswrapper[5021]: I0121 15:48:09.012165 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/110a1110-f52a-40e4-8402-166be87650a8-config-data\") pod \"nova-metadata-0\" (UID: \"110a1110-f52a-40e4-8402-166be87650a8\") " pod="openstack/nova-metadata-0" Jan 21 15:48:09 crc kubenswrapper[5021]: I0121 15:48:09.012246 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/110a1110-f52a-40e4-8402-166be87650a8-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"110a1110-f52a-40e4-8402-166be87650a8\") " pod="openstack/nova-metadata-0" Jan 21 15:48:09 crc kubenswrapper[5021]: I0121 15:48:09.012287 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ctsq9\" (UniqueName: \"kubernetes.io/projected/110a1110-f52a-40e4-8402-166be87650a8-kube-api-access-ctsq9\") pod \"nova-metadata-0\" (UID: \"110a1110-f52a-40e4-8402-166be87650a8\") " pod="openstack/nova-metadata-0" Jan 21 15:48:09 crc kubenswrapper[5021]: I0121 15:48:09.012627 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/110a1110-f52a-40e4-8402-166be87650a8-logs\") pod \"nova-metadata-0\" (UID: \"110a1110-f52a-40e4-8402-166be87650a8\") " pod="openstack/nova-metadata-0" Jan 21 15:48:09 crc kubenswrapper[5021]: I0121 15:48:09.018488 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/110a1110-f52a-40e4-8402-166be87650a8-config-data\") pod \"nova-metadata-0\" (UID: \"110a1110-f52a-40e4-8402-166be87650a8\") " pod="openstack/nova-metadata-0" Jan 21 15:48:09 crc kubenswrapper[5021]: I0121 15:48:09.021563 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/110a1110-f52a-40e4-8402-166be87650a8-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"110a1110-f52a-40e4-8402-166be87650a8\") " pod="openstack/nova-metadata-0" Jan 21 15:48:09 crc kubenswrapper[5021]: I0121 15:48:09.022529 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/110a1110-f52a-40e4-8402-166be87650a8-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"110a1110-f52a-40e4-8402-166be87650a8\") " pod="openstack/nova-metadata-0" Jan 21 15:48:09 crc kubenswrapper[5021]: I0121 15:48:09.034060 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ctsq9\" (UniqueName: \"kubernetes.io/projected/110a1110-f52a-40e4-8402-166be87650a8-kube-api-access-ctsq9\") pod \"nova-metadata-0\" (UID: \"110a1110-f52a-40e4-8402-166be87650a8\") " pod="openstack/nova-metadata-0" Jan 21 15:48:09 crc kubenswrapper[5021]: I0121 15:48:09.124175 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 21 15:48:09 crc kubenswrapper[5021]: I0121 15:48:09.584481 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 21 15:48:09 crc kubenswrapper[5021]: W0121 15:48:09.593234 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod110a1110_f52a_40e4_8402_166be87650a8.slice/crio-65731795182e43d75169509b5c1183611d7f9663ce1fd993666fb3255554e661 WatchSource:0}: Error finding container 65731795182e43d75169509b5c1183611d7f9663ce1fd993666fb3255554e661: Status 404 returned error can't find the container with id 65731795182e43d75169509b5c1183611d7f9663ce1fd993666fb3255554e661 Jan 21 15:48:09 crc kubenswrapper[5021]: I0121 15:48:09.698640 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"110a1110-f52a-40e4-8402-166be87650a8","Type":"ContainerStarted","Data":"65731795182e43d75169509b5c1183611d7f9663ce1fd993666fb3255554e661"} Jan 21 15:48:09 crc kubenswrapper[5021]: I0121 15:48:09.702038 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"0787e96e-5c19-467d-9ad4-ec70202c8cdf","Type":"ContainerStarted","Data":"16060f449a7025ebebb9de9a238ef2f530ca6e0bfb74d144bfcdaf9b91b44f23"} Jan 21 15:48:09 crc kubenswrapper[5021]: I0121 15:48:09.731112 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.731087944 podStartE2EDuration="2.731087944s" podCreationTimestamp="2026-01-21 15:48:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:48:09.717552047 +0000 UTC m=+1431.252665946" watchObservedRunningTime="2026-01-21 15:48:09.731087944 +0000 UTC m=+1431.266201833" Jan 21 15:48:10 crc kubenswrapper[5021]: I0121 15:48:10.713761 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"110a1110-f52a-40e4-8402-166be87650a8","Type":"ContainerStarted","Data":"feae471e1c50172422c6097ccee57bce6ab91a98c54d1223f046f0f30e158360"} Jan 21 15:48:10 crc kubenswrapper[5021]: I0121 15:48:10.714167 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"110a1110-f52a-40e4-8402-166be87650a8","Type":"ContainerStarted","Data":"f0fe818e9e2a058656b7e5d772bd6c84da9de9b5ca099a5c958f3ab93f7c5392"} Jan 21 15:48:10 crc kubenswrapper[5021]: I0121 15:48:10.743705 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.743684329 podStartE2EDuration="2.743684329s" podCreationTimestamp="2026-01-21 15:48:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-21 15:48:10.733278614 +0000 UTC m=+1432.268392513" watchObservedRunningTime="2026-01-21 15:48:10.743684329 +0000 UTC m=+1432.278798218" Jan 21 15:48:10 crc kubenswrapper[5021]: I0121 15:48:10.752542 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="23436463-da03-498e-90e9-a224f2fa1600" path="/var/lib/kubelet/pods/23436463-da03-498e-90e9-a224f2fa1600/volumes" Jan 21 15:48:12 crc kubenswrapper[5021]: I0121 15:48:12.059219 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-htpsq"] Jan 21 15:48:12 crc kubenswrapper[5021]: I0121 15:48:12.061773 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-htpsq" Jan 21 15:48:12 crc kubenswrapper[5021]: I0121 15:48:12.095433 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-htpsq"] Jan 21 15:48:12 crc kubenswrapper[5021]: I0121 15:48:12.174044 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/47ec38ba-efc2-48a2-b9c5-04ba853a8691-catalog-content\") pod \"redhat-operators-htpsq\" (UID: \"47ec38ba-efc2-48a2-b9c5-04ba853a8691\") " pod="openshift-marketplace/redhat-operators-htpsq" Jan 21 15:48:12 crc kubenswrapper[5021]: I0121 15:48:12.174255 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hfs5k\" (UniqueName: \"kubernetes.io/projected/47ec38ba-efc2-48a2-b9c5-04ba853a8691-kube-api-access-hfs5k\") pod \"redhat-operators-htpsq\" (UID: \"47ec38ba-efc2-48a2-b9c5-04ba853a8691\") " pod="openshift-marketplace/redhat-operators-htpsq" Jan 21 15:48:12 crc kubenswrapper[5021]: I0121 15:48:12.174294 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/47ec38ba-efc2-48a2-b9c5-04ba853a8691-utilities\") pod \"redhat-operators-htpsq\" (UID: \"47ec38ba-efc2-48a2-b9c5-04ba853a8691\") " pod="openshift-marketplace/redhat-operators-htpsq" Jan 21 15:48:12 crc kubenswrapper[5021]: I0121 15:48:12.276231 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hfs5k\" (UniqueName: \"kubernetes.io/projected/47ec38ba-efc2-48a2-b9c5-04ba853a8691-kube-api-access-hfs5k\") pod \"redhat-operators-htpsq\" (UID: \"47ec38ba-efc2-48a2-b9c5-04ba853a8691\") " pod="openshift-marketplace/redhat-operators-htpsq" Jan 21 15:48:12 crc kubenswrapper[5021]: I0121 15:48:12.276290 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/47ec38ba-efc2-48a2-b9c5-04ba853a8691-utilities\") pod \"redhat-operators-htpsq\" (UID: \"47ec38ba-efc2-48a2-b9c5-04ba853a8691\") " pod="openshift-marketplace/redhat-operators-htpsq" Jan 21 15:48:12 crc kubenswrapper[5021]: I0121 15:48:12.276411 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/47ec38ba-efc2-48a2-b9c5-04ba853a8691-catalog-content\") pod \"redhat-operators-htpsq\" (UID: \"47ec38ba-efc2-48a2-b9c5-04ba853a8691\") " pod="openshift-marketplace/redhat-operators-htpsq" Jan 21 15:48:12 crc kubenswrapper[5021]: I0121 15:48:12.276971 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/47ec38ba-efc2-48a2-b9c5-04ba853a8691-catalog-content\") pod \"redhat-operators-htpsq\" (UID: \"47ec38ba-efc2-48a2-b9c5-04ba853a8691\") " pod="openshift-marketplace/redhat-operators-htpsq" Jan 21 15:48:12 crc kubenswrapper[5021]: I0121 15:48:12.277560 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/47ec38ba-efc2-48a2-b9c5-04ba853a8691-utilities\") pod \"redhat-operators-htpsq\" (UID: \"47ec38ba-efc2-48a2-b9c5-04ba853a8691\") " pod="openshift-marketplace/redhat-operators-htpsq" Jan 21 15:48:12 crc kubenswrapper[5021]: I0121 15:48:12.298407 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hfs5k\" (UniqueName: \"kubernetes.io/projected/47ec38ba-efc2-48a2-b9c5-04ba853a8691-kube-api-access-hfs5k\") pod \"redhat-operators-htpsq\" (UID: \"47ec38ba-efc2-48a2-b9c5-04ba853a8691\") " pod="openshift-marketplace/redhat-operators-htpsq" Jan 21 15:48:12 crc kubenswrapper[5021]: I0121 15:48:12.396225 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-htpsq" Jan 21 15:48:12 crc kubenswrapper[5021]: I0121 15:48:12.879188 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-htpsq"] Jan 21 15:48:13 crc kubenswrapper[5021]: I0121 15:48:13.136611 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 21 15:48:13 crc kubenswrapper[5021]: E0121 15:48:13.533717 5021 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod47ec38ba_efc2_48a2_b9c5_04ba853a8691.slice/crio-conmon-b62f3c98443953b412d9b61259124a212fc11ad27cc0955aaff8ed76fd0ab856.scope\": RecentStats: unable to find data in memory cache]" Jan 21 15:48:13 crc kubenswrapper[5021]: I0121 15:48:13.748219 5021 generic.go:334] "Generic (PLEG): container finished" podID="47ec38ba-efc2-48a2-b9c5-04ba853a8691" containerID="b62f3c98443953b412d9b61259124a212fc11ad27cc0955aaff8ed76fd0ab856" exitCode=0 Jan 21 15:48:13 crc kubenswrapper[5021]: I0121 15:48:13.748361 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-htpsq" event={"ID":"47ec38ba-efc2-48a2-b9c5-04ba853a8691","Type":"ContainerDied","Data":"b62f3c98443953b412d9b61259124a212fc11ad27cc0955aaff8ed76fd0ab856"} Jan 21 15:48:13 crc kubenswrapper[5021]: I0121 15:48:13.748563 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-htpsq" event={"ID":"47ec38ba-efc2-48a2-b9c5-04ba853a8691","Type":"ContainerStarted","Data":"21ba85b4391fcb6c3174cb38006866ae7c76ea78b3f192db7b2ed8a7e973d0f1"} Jan 21 15:48:14 crc kubenswrapper[5021]: I0121 15:48:14.125219 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 21 15:48:14 crc kubenswrapper[5021]: I0121 15:48:14.125292 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 21 15:48:16 crc kubenswrapper[5021]: I0121 15:48:16.009105 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 21 15:48:16 crc kubenswrapper[5021]: I0121 15:48:16.010127 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 21 15:48:17 crc kubenswrapper[5021]: I0121 15:48:17.022052 5021 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="ad959625-d43f-48c3-b42f-d35e63e9af44" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.205:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 21 15:48:17 crc kubenswrapper[5021]: I0121 15:48:17.022089 5021 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="ad959625-d43f-48c3-b42f-d35e63e9af44" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.205:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 21 15:48:18 crc kubenswrapper[5021]: I0121 15:48:18.136109 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 21 15:48:18 crc kubenswrapper[5021]: I0121 15:48:18.168133 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 21 15:48:18 crc kubenswrapper[5021]: I0121 15:48:18.837370 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 21 15:48:19 crc kubenswrapper[5021]: I0121 15:48:19.124881 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 21 15:48:19 crc kubenswrapper[5021]: I0121 15:48:19.124941 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 21 15:48:20 crc kubenswrapper[5021]: I0121 15:48:20.142281 5021 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="110a1110-f52a-40e4-8402-166be87650a8" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.207:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 21 15:48:20 crc kubenswrapper[5021]: I0121 15:48:20.142315 5021 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="110a1110-f52a-40e4-8402-166be87650a8" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.207:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 21 15:48:20 crc kubenswrapper[5021]: I0121 15:48:20.823213 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-htpsq" event={"ID":"47ec38ba-efc2-48a2-b9c5-04ba853a8691","Type":"ContainerStarted","Data":"24df4912a140964b1e0d41fa753cf688016ba7e3f96a6f1271116b4a363e6c33"} Jan 21 15:48:22 crc kubenswrapper[5021]: I0121 15:48:22.766025 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 21 15:48:26 crc kubenswrapper[5021]: I0121 15:48:26.014288 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 21 15:48:26 crc kubenswrapper[5021]: I0121 15:48:26.014821 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 21 15:48:26 crc kubenswrapper[5021]: I0121 15:48:26.015173 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 21 15:48:26 crc kubenswrapper[5021]: I0121 15:48:26.015195 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 21 15:48:26 crc kubenswrapper[5021]: I0121 15:48:26.022463 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 21 15:48:26 crc kubenswrapper[5021]: I0121 15:48:26.022549 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 21 15:48:27 crc kubenswrapper[5021]: I0121 15:48:27.617120 5021 patch_prober.go:28] interesting pod/router-default-5444994796-4459k container/router namespace/openshift-ingress: Readiness probe status=failure output="Get \"http://localhost:1936/healthz/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 21 15:48:27 crc kubenswrapper[5021]: I0121 15:48:27.617397 5021 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-ingress/router-default-5444994796-4459k" podUID="c490e95d-e462-45b2-8352-9603283319e1" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 21 15:48:27 crc kubenswrapper[5021]: I0121 15:48:27.891330 5021 generic.go:334] "Generic (PLEG): container finished" podID="47ec38ba-efc2-48a2-b9c5-04ba853a8691" containerID="24df4912a140964b1e0d41fa753cf688016ba7e3f96a6f1271116b4a363e6c33" exitCode=0 Jan 21 15:48:27 crc kubenswrapper[5021]: I0121 15:48:27.892070 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-htpsq" event={"ID":"47ec38ba-efc2-48a2-b9c5-04ba853a8691","Type":"ContainerDied","Data":"24df4912a140964b1e0d41fa753cf688016ba7e3f96a6f1271116b4a363e6c33"} Jan 21 15:48:29 crc kubenswrapper[5021]: I0121 15:48:29.132723 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 21 15:48:29 crc kubenswrapper[5021]: I0121 15:48:29.135182 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 21 15:48:29 crc kubenswrapper[5021]: I0121 15:48:29.138816 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 21 15:48:29 crc kubenswrapper[5021]: I0121 15:48:29.912101 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 21 15:48:31 crc kubenswrapper[5021]: I0121 15:48:31.926057 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-htpsq" event={"ID":"47ec38ba-efc2-48a2-b9c5-04ba853a8691","Type":"ContainerStarted","Data":"a3133938f14e8324e72398c04b176cec55efcab2ec1daa967c36bab3517e94d2"} Jan 21 15:48:31 crc kubenswrapper[5021]: I0121 15:48:31.953826 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-htpsq" podStartSLOduration=2.678607842 podStartE2EDuration="19.953807207s" podCreationTimestamp="2026-01-21 15:48:12 +0000 UTC" firstStartedPulling="2026-01-21 15:48:13.751411137 +0000 UTC m=+1435.286525026" lastFinishedPulling="2026-01-21 15:48:31.026610502 +0000 UTC m=+1452.561724391" observedRunningTime="2026-01-21 15:48:31.945467856 +0000 UTC m=+1453.480581745" watchObservedRunningTime="2026-01-21 15:48:31.953807207 +0000 UTC m=+1453.488921096" Jan 21 15:48:32 crc kubenswrapper[5021]: I0121 15:48:32.396703 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-htpsq" Jan 21 15:48:32 crc kubenswrapper[5021]: I0121 15:48:32.396792 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-htpsq" Jan 21 15:48:33 crc kubenswrapper[5021]: I0121 15:48:33.443030 5021 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-htpsq" podUID="47ec38ba-efc2-48a2-b9c5-04ba853a8691" containerName="registry-server" probeResult="failure" output=< Jan 21 15:48:33 crc kubenswrapper[5021]: timeout: failed to connect service ":50051" within 1s Jan 21 15:48:33 crc kubenswrapper[5021]: > Jan 21 15:48:42 crc kubenswrapper[5021]: I0121 15:48:42.449462 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-htpsq" Jan 21 15:48:42 crc kubenswrapper[5021]: I0121 15:48:42.507970 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-htpsq" Jan 21 15:48:43 crc kubenswrapper[5021]: I0121 15:48:43.251525 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-htpsq"] Jan 21 15:48:44 crc kubenswrapper[5021]: I0121 15:48:44.070424 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-htpsq" podUID="47ec38ba-efc2-48a2-b9c5-04ba853a8691" containerName="registry-server" containerID="cri-o://a3133938f14e8324e72398c04b176cec55efcab2ec1daa967c36bab3517e94d2" gracePeriod=2 Jan 21 15:48:45 crc kubenswrapper[5021]: I0121 15:48:45.094514 5021 generic.go:334] "Generic (PLEG): container finished" podID="47ec38ba-efc2-48a2-b9c5-04ba853a8691" containerID="a3133938f14e8324e72398c04b176cec55efcab2ec1daa967c36bab3517e94d2" exitCode=0 Jan 21 15:48:45 crc kubenswrapper[5021]: I0121 15:48:45.094658 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-htpsq" event={"ID":"47ec38ba-efc2-48a2-b9c5-04ba853a8691","Type":"ContainerDied","Data":"a3133938f14e8324e72398c04b176cec55efcab2ec1daa967c36bab3517e94d2"} Jan 21 15:48:45 crc kubenswrapper[5021]: I0121 15:48:45.095471 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-htpsq" event={"ID":"47ec38ba-efc2-48a2-b9c5-04ba853a8691","Type":"ContainerDied","Data":"21ba85b4391fcb6c3174cb38006866ae7c76ea78b3f192db7b2ed8a7e973d0f1"} Jan 21 15:48:45 crc kubenswrapper[5021]: I0121 15:48:45.095527 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="21ba85b4391fcb6c3174cb38006866ae7c76ea78b3f192db7b2ed8a7e973d0f1" Jan 21 15:48:45 crc kubenswrapper[5021]: I0121 15:48:45.116685 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-htpsq" Jan 21 15:48:45 crc kubenswrapper[5021]: I0121 15:48:45.239634 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hfs5k\" (UniqueName: \"kubernetes.io/projected/47ec38ba-efc2-48a2-b9c5-04ba853a8691-kube-api-access-hfs5k\") pod \"47ec38ba-efc2-48a2-b9c5-04ba853a8691\" (UID: \"47ec38ba-efc2-48a2-b9c5-04ba853a8691\") " Jan 21 15:48:45 crc kubenswrapper[5021]: I0121 15:48:45.239710 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/47ec38ba-efc2-48a2-b9c5-04ba853a8691-catalog-content\") pod \"47ec38ba-efc2-48a2-b9c5-04ba853a8691\" (UID: \"47ec38ba-efc2-48a2-b9c5-04ba853a8691\") " Jan 21 15:48:45 crc kubenswrapper[5021]: I0121 15:48:45.239836 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/47ec38ba-efc2-48a2-b9c5-04ba853a8691-utilities\") pod \"47ec38ba-efc2-48a2-b9c5-04ba853a8691\" (UID: \"47ec38ba-efc2-48a2-b9c5-04ba853a8691\") " Jan 21 15:48:45 crc kubenswrapper[5021]: I0121 15:48:45.240926 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/47ec38ba-efc2-48a2-b9c5-04ba853a8691-utilities" (OuterVolumeSpecName: "utilities") pod "47ec38ba-efc2-48a2-b9c5-04ba853a8691" (UID: "47ec38ba-efc2-48a2-b9c5-04ba853a8691"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:48:45 crc kubenswrapper[5021]: I0121 15:48:45.252811 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/47ec38ba-efc2-48a2-b9c5-04ba853a8691-kube-api-access-hfs5k" (OuterVolumeSpecName: "kube-api-access-hfs5k") pod "47ec38ba-efc2-48a2-b9c5-04ba853a8691" (UID: "47ec38ba-efc2-48a2-b9c5-04ba853a8691"). InnerVolumeSpecName "kube-api-access-hfs5k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:48:45 crc kubenswrapper[5021]: I0121 15:48:45.342200 5021 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/47ec38ba-efc2-48a2-b9c5-04ba853a8691-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:45 crc kubenswrapper[5021]: I0121 15:48:45.342645 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hfs5k\" (UniqueName: \"kubernetes.io/projected/47ec38ba-efc2-48a2-b9c5-04ba853a8691-kube-api-access-hfs5k\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:45 crc kubenswrapper[5021]: I0121 15:48:45.373806 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/47ec38ba-efc2-48a2-b9c5-04ba853a8691-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "47ec38ba-efc2-48a2-b9c5-04ba853a8691" (UID: "47ec38ba-efc2-48a2-b9c5-04ba853a8691"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:48:45 crc kubenswrapper[5021]: I0121 15:48:45.444618 5021 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/47ec38ba-efc2-48a2-b9c5-04ba853a8691-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:46 crc kubenswrapper[5021]: I0121 15:48:46.113743 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-htpsq" Jan 21 15:48:46 crc kubenswrapper[5021]: I0121 15:48:46.149427 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-htpsq"] Jan 21 15:48:46 crc kubenswrapper[5021]: I0121 15:48:46.158871 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-htpsq"] Jan 21 15:48:46 crc kubenswrapper[5021]: I0121 15:48:46.750295 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="47ec38ba-efc2-48a2-b9c5-04ba853a8691" path="/var/lib/kubelet/pods/47ec38ba-efc2-48a2-b9c5-04ba853a8691/volumes" Jan 21 15:48:49 crc kubenswrapper[5021]: I0121 15:48:49.824313 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Jan 21 15:48:49 crc kubenswrapper[5021]: I0121 15:48:49.825047 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstackclient" podUID="deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca" containerName="openstackclient" containerID="cri-o://f5e3f741e90cddcaa56c488a9ce56cfd5d36717ab8f8f3d4ee72791c52b6336c" gracePeriod=2 Jan 21 15:48:49 crc kubenswrapper[5021]: I0121 15:48:49.840970 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.215965 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-10e4-account-create-update-ngtt5"] Jan 21 15:48:50 crc kubenswrapper[5021]: E0121 15:48:50.216482 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47ec38ba-efc2-48a2-b9c5-04ba853a8691" containerName="extract-utilities" Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.216505 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="47ec38ba-efc2-48a2-b9c5-04ba853a8691" containerName="extract-utilities" Jan 21 15:48:50 crc kubenswrapper[5021]: E0121 15:48:50.216523 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca" containerName="openstackclient" Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.216534 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca" containerName="openstackclient" Jan 21 15:48:50 crc kubenswrapper[5021]: E0121 15:48:50.216550 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47ec38ba-efc2-48a2-b9c5-04ba853a8691" containerName="registry-server" Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.216559 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="47ec38ba-efc2-48a2-b9c5-04ba853a8691" containerName="registry-server" Jan 21 15:48:50 crc kubenswrapper[5021]: E0121 15:48:50.216575 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47ec38ba-efc2-48a2-b9c5-04ba853a8691" containerName="extract-content" Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.216584 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="47ec38ba-efc2-48a2-b9c5-04ba853a8691" containerName="extract-content" Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.216817 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="47ec38ba-efc2-48a2-b9c5-04ba853a8691" containerName="registry-server" Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.216848 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca" containerName="openstackclient" Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.217640 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-10e4-account-create-update-ngtt5" Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.233540 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.255205 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-10e4-account-create-update-ngtt5"] Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.308316 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.342628 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-0608-account-create-update-m8cb5"] Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.343842 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-0608-account-create-update-m8cb5" Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.350429 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9gzpn\" (UniqueName: \"kubernetes.io/projected/edf8a635-556a-46dc-82a4-68a4d40a7381-kube-api-access-9gzpn\") pod \"barbican-10e4-account-create-update-ngtt5\" (UID: \"edf8a635-556a-46dc-82a4-68a4d40a7381\") " pod="openstack/barbican-10e4-account-create-update-ngtt5" Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.350646 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/edf8a635-556a-46dc-82a4-68a4d40a7381-operator-scripts\") pod \"barbican-10e4-account-create-update-ngtt5\" (UID: \"edf8a635-556a-46dc-82a4-68a4d40a7381\") " pod="openstack/barbican-10e4-account-create-update-ngtt5" Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.360210 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.363478 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-0608-account-create-update-m8cb5"] Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.414688 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-mpfp7"] Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.421589 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-mpfp7" Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.456604 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9gzpn\" (UniqueName: \"kubernetes.io/projected/edf8a635-556a-46dc-82a4-68a4d40a7381-kube-api-access-9gzpn\") pod \"barbican-10e4-account-create-update-ngtt5\" (UID: \"edf8a635-556a-46dc-82a4-68a4d40a7381\") " pod="openstack/barbican-10e4-account-create-update-ngtt5" Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.456867 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/edf8a635-556a-46dc-82a4-68a4d40a7381-operator-scripts\") pod \"barbican-10e4-account-create-update-ngtt5\" (UID: \"edf8a635-556a-46dc-82a4-68a4d40a7381\") " pod="openstack/barbican-10e4-account-create-update-ngtt5" Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.456969 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cxvl9\" (UniqueName: \"kubernetes.io/projected/52d192b4-971e-419a-8c85-cf70066656e7-kube-api-access-cxvl9\") pod \"placement-0608-account-create-update-m8cb5\" (UID: \"52d192b4-971e-419a-8c85-cf70066656e7\") " pod="openstack/placement-0608-account-create-update-m8cb5" Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.457061 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/52d192b4-971e-419a-8c85-cf70066656e7-operator-scripts\") pod \"placement-0608-account-create-update-m8cb5\" (UID: \"52d192b4-971e-419a-8c85-cf70066656e7\") " pod="openstack/placement-0608-account-create-update-m8cb5" Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.457104 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-mariadb-root-db-secret" Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.458666 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/edf8a635-556a-46dc-82a4-68a4d40a7381-operator-scripts\") pod \"barbican-10e4-account-create-update-ngtt5\" (UID: \"edf8a635-556a-46dc-82a4-68a4d40a7381\") " pod="openstack/barbican-10e4-account-create-update-ngtt5" Jan 21 15:48:50 crc kubenswrapper[5021]: E0121 15:48:50.458889 5021 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Jan 21 15:48:50 crc kubenswrapper[5021]: E0121 15:48:50.458956 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-config-data podName:b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b nodeName:}" failed. No retries permitted until 2026-01-21 15:48:50.958938986 +0000 UTC m=+1472.494052875 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-config-data") pod "rabbitmq-server-0" (UID: "b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b") : configmap "rabbitmq-config-data" not found Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.461974 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-mpfp7"] Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.521700 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9gzpn\" (UniqueName: \"kubernetes.io/projected/edf8a635-556a-46dc-82a4-68a4d40a7381-kube-api-access-9gzpn\") pod \"barbican-10e4-account-create-update-ngtt5\" (UID: \"edf8a635-556a-46dc-82a4-68a4d40a7381\") " pod="openstack/barbican-10e4-account-create-update-ngtt5" Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.563588 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-10e4-account-create-update-ngtt5" Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.565003 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cxvl9\" (UniqueName: \"kubernetes.io/projected/52d192b4-971e-419a-8c85-cf70066656e7-kube-api-access-cxvl9\") pod \"placement-0608-account-create-update-m8cb5\" (UID: \"52d192b4-971e-419a-8c85-cf70066656e7\") " pod="openstack/placement-0608-account-create-update-m8cb5" Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.565129 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a157b13a-50bf-4c22-805f-cd042780925c-operator-scripts\") pod \"root-account-create-update-mpfp7\" (UID: \"a157b13a-50bf-4c22-805f-cd042780925c\") " pod="openstack/root-account-create-update-mpfp7" Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.565234 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/52d192b4-971e-419a-8c85-cf70066656e7-operator-scripts\") pod \"placement-0608-account-create-update-m8cb5\" (UID: \"52d192b4-971e-419a-8c85-cf70066656e7\") " pod="openstack/placement-0608-account-create-update-m8cb5" Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.565346 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-74m6d\" (UniqueName: \"kubernetes.io/projected/a157b13a-50bf-4c22-805f-cd042780925c-kube-api-access-74m6d\") pod \"root-account-create-update-mpfp7\" (UID: \"a157b13a-50bf-4c22-805f-cd042780925c\") " pod="openstack/root-account-create-update-mpfp7" Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.566433 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/52d192b4-971e-419a-8c85-cf70066656e7-operator-scripts\") pod \"placement-0608-account-create-update-m8cb5\" (UID: \"52d192b4-971e-419a-8c85-cf70066656e7\") " pod="openstack/placement-0608-account-create-update-m8cb5" Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.578322 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-10e4-account-create-update-vpspz"] Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.612834 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-10e4-account-create-update-vpspz"] Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.670504 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a157b13a-50bf-4c22-805f-cd042780925c-operator-scripts\") pod \"root-account-create-update-mpfp7\" (UID: \"a157b13a-50bf-4c22-805f-cd042780925c\") " pod="openstack/root-account-create-update-mpfp7" Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.670621 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-74m6d\" (UniqueName: \"kubernetes.io/projected/a157b13a-50bf-4c22-805f-cd042780925c-kube-api-access-74m6d\") pod \"root-account-create-update-mpfp7\" (UID: \"a157b13a-50bf-4c22-805f-cd042780925c\") " pod="openstack/root-account-create-update-mpfp7" Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.671620 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a157b13a-50bf-4c22-805f-cd042780925c-operator-scripts\") pod \"root-account-create-update-mpfp7\" (UID: \"a157b13a-50bf-4c22-805f-cd042780925c\") " pod="openstack/root-account-create-update-mpfp7" Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.700873 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cxvl9\" (UniqueName: \"kubernetes.io/projected/52d192b4-971e-419a-8c85-cf70066656e7-kube-api-access-cxvl9\") pod \"placement-0608-account-create-update-m8cb5\" (UID: \"52d192b4-971e-419a-8c85-cf70066656e7\") " pod="openstack/placement-0608-account-create-update-m8cb5" Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.732589 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-48eb-account-create-update-scv9x"] Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.734208 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-48eb-account-create-update-scv9x" Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.737139 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.773163 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="da995ed3-1cf2-4f8e-ba7f-9780821e31f3" path="/var/lib/kubelet/pods/da995ed3-1cf2-4f8e-ba7f-9780821e31f3/volumes" Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.774324 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vjxjj\" (UniqueName: \"kubernetes.io/projected/dba40158-ac19-4635-8e0d-c97ab15f65bf-kube-api-access-vjxjj\") pod \"nova-api-48eb-account-create-update-scv9x\" (UID: \"dba40158-ac19-4635-8e0d-c97ab15f65bf\") " pod="openstack/nova-api-48eb-account-create-update-scv9x" Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.774524 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dba40158-ac19-4635-8e0d-c97ab15f65bf-operator-scripts\") pod \"nova-api-48eb-account-create-update-scv9x\" (UID: \"dba40158-ac19-4635-8e0d-c97ab15f65bf\") " pod="openstack/nova-api-48eb-account-create-update-scv9x" Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.780888 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-74m6d\" (UniqueName: \"kubernetes.io/projected/a157b13a-50bf-4c22-805f-cd042780925c-kube-api-access-74m6d\") pod \"root-account-create-update-mpfp7\" (UID: \"a157b13a-50bf-4c22-805f-cd042780925c\") " pod="openstack/root-account-create-update-mpfp7" Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.785217 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-0608-account-create-update-rgw6j"] Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.785566 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-mpfp7" Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.804006 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-0608-account-create-update-rgw6j"] Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.834075 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-48eb-account-create-update-scv9x"] Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.850019 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-t7cgc"] Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.862513 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-t7cgc"] Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.882508 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vjxjj\" (UniqueName: \"kubernetes.io/projected/dba40158-ac19-4635-8e0d-c97ab15f65bf-kube-api-access-vjxjj\") pod \"nova-api-48eb-account-create-update-scv9x\" (UID: \"dba40158-ac19-4635-8e0d-c97ab15f65bf\") " pod="openstack/nova-api-48eb-account-create-update-scv9x" Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.882621 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dba40158-ac19-4635-8e0d-c97ab15f65bf-operator-scripts\") pod \"nova-api-48eb-account-create-update-scv9x\" (UID: \"dba40158-ac19-4635-8e0d-c97ab15f65bf\") " pod="openstack/nova-api-48eb-account-create-update-scv9x" Jan 21 15:48:50 crc kubenswrapper[5021]: I0121 15:48:50.895777 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dba40158-ac19-4635-8e0d-c97ab15f65bf-operator-scripts\") pod \"nova-api-48eb-account-create-update-scv9x\" (UID: \"dba40158-ac19-4635-8e0d-c97ab15f65bf\") " pod="openstack/nova-api-48eb-account-create-update-scv9x" Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:50.908397 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-995xv"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:50.924979 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-48eb-account-create-update-dcvwx"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:50.933138 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-995xv"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:50.939690 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vjxjj\" (UniqueName: \"kubernetes.io/projected/dba40158-ac19-4635-8e0d-c97ab15f65bf-kube-api-access-vjxjj\") pod \"nova-api-48eb-account-create-update-scv9x\" (UID: \"dba40158-ac19-4635-8e0d-c97ab15f65bf\") " pod="openstack/nova-api-48eb-account-create-update-scv9x" Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:50.957986 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-48eb-account-create-update-dcvwx"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:50.971062 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-0608-account-create-update-m8cb5" Jan 21 15:48:53 crc kubenswrapper[5021]: E0121 15:48:50.995649 5021 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Jan 21 15:48:53 crc kubenswrapper[5021]: E0121 15:48:50.995772 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-config-data podName:b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b nodeName:}" failed. No retries permitted until 2026-01-21 15:48:51.995742644 +0000 UTC m=+1473.530856553 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-config-data") pod "rabbitmq-server-0" (UID: "b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b") : configmap "rabbitmq-config-data" not found Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:50.999349 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-northd-0"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:50.999717 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-northd-0" podUID="e360fedf-3856-4f89-980d-f5f282e2f696" containerName="ovn-northd" containerID="cri-o://894abc06fdb5fc6222a6ec829e8805ef293fd5d9f74c877b7ed68626d7794fdd" gracePeriod=30 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:51.000221 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-northd-0" podUID="e360fedf-3856-4f89-980d-f5f282e2f696" containerName="openstack-network-exporter" containerID="cri-o://55ece29f6857bcec4db5996a88f5ec90a9d812572bf1153d37ef4b809c300168" gracePeriod=30 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:51.125440 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-4f73-account-create-update-c2q4j"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:51.128411 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-4f73-account-create-update-c2q4j" Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:51.142429 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:51.143745 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-48eb-account-create-update-scv9x" Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:51.235749 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8b26760a-85cf-4259-affd-9fa52e3766fe-operator-scripts\") pod \"nova-cell1-4f73-account-create-update-c2q4j\" (UID: \"8b26760a-85cf-4259-affd-9fa52e3766fe\") " pod="openstack/nova-cell1-4f73-account-create-update-c2q4j" Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:51.235818 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fpmbl\" (UniqueName: \"kubernetes.io/projected/8b26760a-85cf-4259-affd-9fa52e3766fe-kube-api-access-fpmbl\") pod \"nova-cell1-4f73-account-create-update-c2q4j\" (UID: \"8b26760a-85cf-4259-affd-9fa52e3766fe\") " pod="openstack/nova-cell1-4f73-account-create-update-c2q4j" Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:51.265978 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-4f73-account-create-update-c2q4j"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:51.329381 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-galera-0" podUID="4184ba08-6582-4367-abd3-9e9cffb5b716" containerName="galera" probeResult="failure" output="command timed out" Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:51.332292 5021 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="4184ba08-6582-4367-abd3-9e9cffb5b716" containerName="galera" probeResult="failure" output="command timed out" Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:51.339825 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8b26760a-85cf-4259-affd-9fa52e3766fe-operator-scripts\") pod \"nova-cell1-4f73-account-create-update-c2q4j\" (UID: \"8b26760a-85cf-4259-affd-9fa52e3766fe\") " pod="openstack/nova-cell1-4f73-account-create-update-c2q4j" Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:51.339870 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fpmbl\" (UniqueName: \"kubernetes.io/projected/8b26760a-85cf-4259-affd-9fa52e3766fe-kube-api-access-fpmbl\") pod \"nova-cell1-4f73-account-create-update-c2q4j\" (UID: \"8b26760a-85cf-4259-affd-9fa52e3766fe\") " pod="openstack/nova-cell1-4f73-account-create-update-c2q4j" Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:51.340901 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8b26760a-85cf-4259-affd-9fa52e3766fe-operator-scripts\") pod \"nova-cell1-4f73-account-create-update-c2q4j\" (UID: \"8b26760a-85cf-4259-affd-9fa52e3766fe\") " pod="openstack/nova-cell1-4f73-account-create-update-c2q4j" Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:51.404889 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-1670-account-create-update-5jkx8"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:51.406175 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-1670-account-create-update-5jkx8" Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:51.416849 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:51.425294 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fpmbl\" (UniqueName: \"kubernetes.io/projected/8b26760a-85cf-4259-affd-9fa52e3766fe-kube-api-access-fpmbl\") pod \"nova-cell1-4f73-account-create-update-c2q4j\" (UID: \"8b26760a-85cf-4259-affd-9fa52e3766fe\") " pod="openstack/nova-cell1-4f73-account-create-update-c2q4j" Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:51.446130 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lxtxw\" (UniqueName: \"kubernetes.io/projected/317e6b88-faf8-418a-8036-79ec4dacd19e-kube-api-access-lxtxw\") pod \"nova-cell0-1670-account-create-update-5jkx8\" (UID: \"317e6b88-faf8-418a-8036-79ec4dacd19e\") " pod="openstack/nova-cell0-1670-account-create-update-5jkx8" Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:51.446216 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/317e6b88-faf8-418a-8036-79ec4dacd19e-operator-scripts\") pod \"nova-cell0-1670-account-create-update-5jkx8\" (UID: \"317e6b88-faf8-418a-8036-79ec4dacd19e\") " pod="openstack/nova-cell0-1670-account-create-update-5jkx8" Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:51.460867 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-1670-account-create-update-5jkx8"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:51.510013 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-4f73-account-create-update-tcc9q"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:51.550258 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/317e6b88-faf8-418a-8036-79ec4dacd19e-operator-scripts\") pod \"nova-cell0-1670-account-create-update-5jkx8\" (UID: \"317e6b88-faf8-418a-8036-79ec4dacd19e\") " pod="openstack/nova-cell0-1670-account-create-update-5jkx8" Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:51.550408 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lxtxw\" (UniqueName: \"kubernetes.io/projected/317e6b88-faf8-418a-8036-79ec4dacd19e-kube-api-access-lxtxw\") pod \"nova-cell0-1670-account-create-update-5jkx8\" (UID: \"317e6b88-faf8-418a-8036-79ec4dacd19e\") " pod="openstack/nova-cell0-1670-account-create-update-5jkx8" Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:51.551335 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/317e6b88-faf8-418a-8036-79ec4dacd19e-operator-scripts\") pod \"nova-cell0-1670-account-create-update-5jkx8\" (UID: \"317e6b88-faf8-418a-8036-79ec4dacd19e\") " pod="openstack/nova-cell0-1670-account-create-update-5jkx8" Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:51.551658 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-4f73-account-create-update-c2q4j" Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:51.599142 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:51.634084 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-gm6fx"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:51.648080 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lxtxw\" (UniqueName: \"kubernetes.io/projected/317e6b88-faf8-418a-8036-79ec4dacd19e-kube-api-access-lxtxw\") pod \"nova-cell0-1670-account-create-update-5jkx8\" (UID: \"317e6b88-faf8-418a-8036-79ec4dacd19e\") " pod="openstack/nova-cell0-1670-account-create-update-5jkx8" Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:51.681032 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-8jhv7"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:51.708994 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-4f73-account-create-update-tcc9q"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:51.754670 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-8jhv7"] Jan 21 15:48:53 crc kubenswrapper[5021]: E0121 15:48:51.759865 5021 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Jan 21 15:48:53 crc kubenswrapper[5021]: E0121 15:48:51.759941 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-config-data podName:2dff28e1-6d0f-4a7d-8fcf-0edf26e63825 nodeName:}" failed. No retries permitted until 2026-01-21 15:48:52.259924388 +0000 UTC m=+1473.795038287 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-config-data") pod "rabbitmq-cell1-server-0" (UID: "2dff28e1-6d0f-4a7d-8fcf-0edf26e63825") : configmap "rabbitmq-cell1-config-data" not found Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:51.760750 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-1670-account-create-update-5jkx8" Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:51.789274 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-gm6fx"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:51.824985 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:51.825648 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-nb-0" podUID="62419df2-740b-473d-8fff-9ea018a268e5" containerName="openstack-network-exporter" containerID="cri-o://be5a47b27b20c5b0ab887ff6ead17e4f4a7b05b5ba87488bbbbe349813177d79" gracePeriod=300 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:51.880957 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-1670-account-create-update-rpbpq"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:51.898723 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-x6jrz"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:51.925031 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-x6jrz"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:51.937060 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-1670-account-create-update-rpbpq"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:51.973029 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:51.973679 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-sb-0" podUID="70ec8329-7d58-465c-9234-7e4543fe4538" containerName="openstack-network-exporter" containerID="cri-o://00dc1a70c5582842a5b18750882608ff55e28ecac0f1421cc5d2e9d1a3cd1b00" gracePeriod=300 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.003150 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c7b6c5df9-knczx"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.003389 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5c7b6c5df9-knczx" podUID="16b38c07-3cc7-45b6-9145-514af8206bdb" containerName="dnsmasq-dns" containerID="cri-o://2fc2eb7f65b6d060d0dd9c5922dc5172b7314bb18a053569c709b7586f92be06" gracePeriod=10 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.016087 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.016366 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="7369dbac-285b-4322-8322-41b1b450d199" containerName="cinder-scheduler" containerID="cri-o://437d3b10fb0fb297b844f9dbf1d4a83367b420ef57cd073914c0525d5c579f5d" gracePeriod=30 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.016501 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="7369dbac-285b-4322-8322-41b1b450d199" containerName="probe" containerID="cri-o://0771568118c8d7a6aa5ededbd663532cb393bc36afe29638946df189c2108dfc" gracePeriod=30 Jan 21 15:48:53 crc kubenswrapper[5021]: E0121 15:48:52.065793 5021 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Jan 21 15:48:53 crc kubenswrapper[5021]: E0121 15:48:52.065892 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-config-data podName:b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b nodeName:}" failed. No retries permitted until 2026-01-21 15:48:54.0658735 +0000 UTC m=+1475.600987389 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-config-data") pod "rabbitmq-server-0" (UID: "b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b") : configmap "rabbitmq-config-data" not found Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.090031 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-fxqns"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.110045 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-metrics-c764x"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.110287 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-metrics-c764x" podUID="0018f301-49d2-4884-abf4-23b4687de8fd" containerName="openstack-network-exporter" containerID="cri-o://68cab115728d1c091f3a993f8259c122399204e30f7f18e3a2170fa8fd30b98f" gracePeriod=30 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.142974 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-fxqns"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.185141 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-86fx8"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.190069 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-xqkct"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.211198 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-ovs-bk98m"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.243690 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-ring-rebalance-86fx8"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.257975 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.258239 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="7ccf7211-3a03-41f1-839a-7bda93e55d4b" containerName="cinder-api-log" containerID="cri-o://acb12fa4b5f061852748af753502ce94371a4a867002ad11d238b65b996be3e7" gracePeriod=30 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.258571 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="7ccf7211-3a03-41f1-839a-7bda93e55d4b" containerName="cinder-api" containerID="cri-o://34073a8e93f07e196867b52269b44be932eaae6b829c7faf37daff1fefaef5dd" gracePeriod=30 Jan 21 15:48:53 crc kubenswrapper[5021]: E0121 15:48:52.270927 5021 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Jan 21 15:48:53 crc kubenswrapper[5021]: E0121 15:48:52.271004 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-config-data podName:2dff28e1-6d0f-4a7d-8fcf-0edf26e63825 nodeName:}" failed. No retries permitted until 2026-01-21 15:48:53.270986106 +0000 UTC m=+1474.806099995 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-config-data") pod "rabbitmq-cell1-server-0" (UID: "2dff28e1-6d0f-4a7d-8fcf-0edf26e63825") : configmap "rabbitmq-cell1-config-data" not found Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.281315 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-mqzs9"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.300088 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-mqzs9"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.310286 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-storage-0"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.311045 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="account-server" containerID="cri-o://5801c40336a7430220f5050e7b4c6fc8997538d48d91daa78538847e03eb5b9c" gracePeriod=30 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.311578 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="swift-recon-cron" containerID="cri-o://1177abe509b1fc7d36535c70f37ab796f728a73afa2630b7247a37b263d96673" gracePeriod=30 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.311859 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="rsync" containerID="cri-o://d0d02b0697c6f0cdbe32b4c15779fa2b7fb9db8ad0d4beee7917a8570d9ce131" gracePeriod=30 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.311928 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="object-expirer" containerID="cri-o://0b98874dda34c3adb9708dfa4fddca97d42d24280001e6ca51c29fdf4e04e366" gracePeriod=30 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.311977 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="object-updater" containerID="cri-o://21f76b84c77562932f1ebb5a263ddfe5a755ae6258ad955ca59a13307d229d84" gracePeriod=30 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.312028 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="object-auditor" containerID="cri-o://286ee38dedf5ca3a893d36e49ab99761202c13f3e2d7786385e279604c029ca3" gracePeriod=30 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.312066 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="object-replicator" containerID="cri-o://a89b180c0135b475ed1ba2315e698962a948ee0a359d0c123f97f5bef6cca782" gracePeriod=30 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.312100 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="object-server" containerID="cri-o://70645c7bc51255ddd66eef76b13c0c8daa2f66c30285a645c871c40c0117099a" gracePeriod=30 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.312144 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="container-updater" containerID="cri-o://1ca716b9f11f9eb3707f3cd9724e75ee4eb6224c4c1e84903f22f728f45b5a6e" gracePeriod=30 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.312178 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="container-auditor" containerID="cri-o://ebcb5861aa10209409b721ea6e382ae9a04e2327d3329449b46709721ed4a126" gracePeriod=30 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.312237 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="container-replicator" containerID="cri-o://de92f672063435b72b37aaebf43b6130f273d9c86bb2fbfd7c96ca15e567638a" gracePeriod=30 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.312280 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="container-server" containerID="cri-o://6107623cd8f4072bc502c561748852925819d887d3f75272057a2e95b4ad1df7" gracePeriod=30 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.312311 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="account-reaper" containerID="cri-o://26edaec702317ca592975b15ba32e49f2dbd21f92807d5d36fce7823804ed53c" gracePeriod=30 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.312340 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="account-auditor" containerID="cri-o://0d1d92941497d1a0e50ff5085e977bdf5928704f84bf870731efcc8fcb1d2f1c" gracePeriod=30 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.312372 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="account-replicator" containerID="cri-o://c5661f129fdffc8a3bd461399ca660bc553970556f3c6af116113c924c302646" gracePeriod=30 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.359357 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-rl28t"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.384318 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-rl28t"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.402400 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_e360fedf-3856-4f89-980d-f5f282e2f696/ovn-northd/0.log" Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.402454 5021 generic.go:334] "Generic (PLEG): container finished" podID="e360fedf-3856-4f89-980d-f5f282e2f696" containerID="55ece29f6857bcec4db5996a88f5ec90a9d812572bf1153d37ef4b809c300168" exitCode=2 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.402472 5021 generic.go:334] "Generic (PLEG): container finished" podID="e360fedf-3856-4f89-980d-f5f282e2f696" containerID="894abc06fdb5fc6222a6ec829e8805ef293fd5d9f74c877b7ed68626d7794fdd" exitCode=143 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.402500 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"e360fedf-3856-4f89-980d-f5f282e2f696","Type":"ContainerDied","Data":"55ece29f6857bcec4db5996a88f5ec90a9d812572bf1153d37ef4b809c300168"} Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.402547 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"e360fedf-3856-4f89-980d-f5f282e2f696","Type":"ContainerDied","Data":"894abc06fdb5fc6222a6ec829e8805ef293fd5d9f74c877b7ed68626d7794fdd"} Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.405964 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-44zpx"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.443369 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-44zpx"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.493219 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-8c4f-account-create-update-jb2q4"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.507930 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.508247 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="446aadfb-ac91-4335-9bac-4f8d7663ab6a" containerName="glance-log" containerID="cri-o://13e437facb9eed154cb3a1fc466a26799000944b7d4e3a944a35585bf94ce10b" gracePeriod=30 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.508415 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="446aadfb-ac91-4335-9bac-4f8d7663ab6a" containerName="glance-httpd" containerID="cri-o://6b8005ac26237642083ae21d321912d31a41bc53e8cc8714923e3a28c95e2695" gracePeriod=30 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.525993 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-8c4f-account-create-update-jb2q4"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.551970 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.552219 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="0415e622-e0cf-4097-865a-a0970f2acc07" containerName="glance-log" containerID="cri-o://d29d9648241688d22882bb1ef26e5b75f7e75ce105b478819e5cb0b36d9eaa34" gracePeriod=30 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.552618 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="0415e622-e0cf-4097-865a-a0970f2acc07" containerName="glance-httpd" containerID="cri-o://20a7c05d680426c518dcf812d8d7a9481aa09f5c574f8ae028a68e0fbb6c1a5e" gracePeriod=30 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.594692 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-c795c5585-m9bzp"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.594995 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-c795c5585-m9bzp" podUID="ddbf76eb-0e2a-4332-b741-0e0b63b60465" containerName="neutron-api" containerID="cri-o://40d25da6134bf8a7e089cfd3c065d27e8bfaa57441da4124734dff449dcf1ca3" gracePeriod=30 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.595456 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-c795c5585-m9bzp" podUID="ddbf76eb-0e2a-4332-b741-0e0b63b60465" containerName="neutron-httpd" containerID="cri-o://f2a67cef1ba2db3bd18f3dfff338b23523c5e1f0086e64b2d5a4f0a2940f8a1e" gracePeriod=30 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.630991 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-fba3-account-create-update-mmtzp"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.652723 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-fba3-account-create-update-mmtzp"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.677681 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-5zlsq"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.690166 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-57f8ddbc76-dgfjh"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.690509 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-57f8ddbc76-dgfjh" podUID="469c5416-c102-43c5-8801-502231a86238" containerName="placement-log" containerID="cri-o://568335520c5abb99b1d9dd2a7aa68f565adae7b72f51cf91144f9ac64fbbdece" gracePeriod=30 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.691050 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-57f8ddbc76-dgfjh" podUID="469c5416-c102-43c5-8801-502231a86238" containerName="placement-api" containerID="cri-o://ed0b25896af93d99f78d4d4db9ef15750f9683c1f7556443210e10912e9c3954" gracePeriod=30 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.701146 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-5zlsq"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.715757 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-10e4-account-create-update-ngtt5"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.723042 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-dkzj5"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.731930 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-proxy-7bbf467d99-62cpf"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.732155 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-7bbf467d99-62cpf" podUID="4687452c-74ba-4f3e-ac17-1cf4c2e514d8" containerName="proxy-httpd" containerID="cri-o://16670df6a896fe5b4ccec437b88876c2007832ffa1ddab3c5a874249577f502d" gracePeriod=30 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.732423 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-7bbf467d99-62cpf" podUID="4687452c-74ba-4f3e-ac17-1cf4c2e514d8" containerName="proxy-server" containerID="cri-o://21d2a742eb18fb8403f215dcc111b5047c3d3f27852bd27e95a06b29a951b3cd" gracePeriod=30 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.752156 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="02027ced-d71f-420b-9f26-10adafa52051" path="/var/lib/kubelet/pods/02027ced-d71f-420b-9f26-10adafa52051/volumes" Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.752580 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-nb-0" podUID="62419df2-740b-473d-8fff-9ea018a268e5" containerName="ovsdbserver-nb" containerID="cri-o://11ad5f2262a8d76744bb2dbd3bcaec7d5afd1ded3ec49308661f05117d09f401" gracePeriod=300 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.752774 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="03d8cc6b-a5fc-4f8c-9b94-a3d54114278f" path="/var/lib/kubelet/pods/03d8cc6b-a5fc-4f8c-9b94-a3d54114278f/volumes" Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.754695 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="085747f2-d183-4dc9-89dc-91a732a1d6b0" path="/var/lib/kubelet/pods/085747f2-d183-4dc9-89dc-91a732a1d6b0/volumes" Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.755328 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3bfd21fb-7d79-4523-9626-3fcc93ff1db3" path="/var/lib/kubelet/pods/3bfd21fb-7d79-4523-9626-3fcc93ff1db3/volumes" Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.756191 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4601017a-6691-4486-8bc0-e469284ec4e2" path="/var/lib/kubelet/pods/4601017a-6691-4486-8bc0-e469284ec4e2/volumes" Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.756763 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="52f108a9-a567-4074-88db-05c8c2feea41" path="/var/lib/kubelet/pods/52f108a9-a567-4074-88db-05c8c2feea41/volumes" Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.757557 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6b6d6a49-2772-4d64-a665-618dfc7e2035" path="/var/lib/kubelet/pods/6b6d6a49-2772-4d64-a665-618dfc7e2035/volumes" Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.758790 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="898b9fde-4ac9-449b-9ede-ea24a67e38e9" path="/var/lib/kubelet/pods/898b9fde-4ac9-449b-9ede-ea24a67e38e9/volumes" Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.759427 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8e81a613-3013-46ca-9964-ad6c7deea2b2" path="/var/lib/kubelet/pods/8e81a613-3013-46ca-9964-ad6c7deea2b2/volumes" Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.760821 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-sb-0" podUID="70ec8329-7d58-465c-9234-7e4543fe4538" containerName="ovsdbserver-sb" containerID="cri-o://c2577e6926c2b20d5997e13950dc6cfaeb16569002c47c4ba9f66d3aaaed7055" gracePeriod=300 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.772460 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f97c362-e247-4151-b007-2b3006b50488" path="/var/lib/kubelet/pods/8f97c362-e247-4151-b007-2b3006b50488/volumes" Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.773576 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a7d0dd68-feb3-44e7-8f06-a94cc8ce3c81" path="/var/lib/kubelet/pods/a7d0dd68-feb3-44e7-8f06-a94cc8ce3c81/volumes" Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.774210 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a9b18b31-e098-4b42-be98-c3d6357905d1" path="/var/lib/kubelet/pods/a9b18b31-e098-4b42-be98-c3d6357905d1/volumes" Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.774807 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd0690b4-983b-42e0-91d3-fff22e7938c2" path="/var/lib/kubelet/pods/bd0690b4-983b-42e0-91d3-fff22e7938c2/volumes" Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.781852 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c39660b0-c6d5-4b6e-95b8-12b8fbf38a14" path="/var/lib/kubelet/pods/c39660b0-c6d5-4b6e-95b8-12b8fbf38a14/volumes" Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.782781 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c693b8ce-a34d-4a4b-b6e8-1495764299d7" path="/var/lib/kubelet/pods/c693b8ce-a34d-4a4b-b6e8-1495764299d7/volumes" Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.783413 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cb49e3b7-78e5-4094-9bf0-d25f350d70a2" path="/var/lib/kubelet/pods/cb49e3b7-78e5-4094-9bf0-d25f350d70a2/volumes" Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.787352 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e369fc7c-044b-47cc-964f-601d7c06f150" path="/var/lib/kubelet/pods/e369fc7c-044b-47cc-964f-601d7c06f150/volumes" Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.788310 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-wfhkk"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.788337 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-a8a5-account-create-update-x4n5x"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.788350 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-r4jkd"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.788361 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-0608-account-create-update-m8cb5"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.788394 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-dkzj5"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.788415 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-wfhkk"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.788431 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-a8a5-account-create-update-x4n5x"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.798465 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-r4jkd"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.811823 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-4f73-account-create-update-c2q4j"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.821379 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.832959 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.833241 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="110a1110-f52a-40e4-8402-166be87650a8" containerName="nova-metadata-log" containerID="cri-o://f0fe818e9e2a058656b7e5d772bd6c84da9de9b5ca099a5c958f3ab93f7c5392" gracePeriod=30 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.833646 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="110a1110-f52a-40e4-8402-166be87650a8" containerName="nova-metadata-metadata" containerID="cri-o://feae471e1c50172422c6097ccee57bce6ab91a98c54d1223f046f0f30e158360" gracePeriod=30 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.849973 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-gd72h"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.875919 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-gd72h"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.887966 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-76bc56d748-8glcs"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.888222 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-76bc56d748-8glcs" podUID="e7a38a9d-65cf-48dd-8f36-44a78a53e48f" containerName="barbican-keystone-listener-log" containerID="cri-o://233265452bf90deac8f8558f1e900daf7f05dda82bc41b72a507e02c47ad409b" gracePeriod=30 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.888347 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-76bc56d748-8glcs" podUID="e7a38a9d-65cf-48dd-8f36-44a78a53e48f" containerName="barbican-keystone-listener" containerID="cri-o://7673fa928a9d34d9093a07ad100e4f08c6bac6b0eb9a73dd956508ac3f6d49ca" gracePeriod=30 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.897002 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-1670-account-create-update-5jkx8"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.907627 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-pzb9h"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.912145 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b" containerName="rabbitmq" containerID="cri-o://d55fd3560f293f7b3d5438cf1c04fd0d68375dd2c61252e90dab6e4eb53445b2" gracePeriod=604800 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.915194 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.915511 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="ad959625-d43f-48c3-b42f-d35e63e9af44" containerName="nova-api-log" containerID="cri-o://d3222a69d9fcc61f98b79a68e5ce69836f0becb1b379e4dc1551706c9de15b9d" gracePeriod=30 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.915850 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="ad959625-d43f-48c3-b42f-d35e63e9af44" containerName="nova-api-api" containerID="cri-o://bfdbb4c8a7a050190ec619b218dfb9517faf71368ffc310feb532deef033dc55" gracePeriod=30 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.927487 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-7bc8f89b55-8c6t2"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.927790 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-7bc8f89b55-8c6t2" podUID="4d076ab0-b0c8-48a0-baa0-589c99376c72" containerName="barbican-worker-log" containerID="cri-o://fcc7d2e930abd6b478f82fcfe23ce92e362592301c9c8fb4f5dea9d2b2bedb88" gracePeriod=30 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.927852 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-7bc8f89b55-8c6t2" podUID="4d076ab0-b0c8-48a0-baa0-589c99376c72" containerName="barbican-worker" containerID="cri-o://e1e5425a7a11ce797c6d259dce5739f59a0a9337b9e52538e7943644bd38dc3e" gracePeriod=30 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.936943 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-7b8886d4fd-qn9sz"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.937188 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-7b8886d4fd-qn9sz" podUID="fad66107-0589-4ed8-94dc-fd29f2f58c43" containerName="barbican-api-log" containerID="cri-o://b0be39c8ae52be02d5990fbd6de2c149adc18e9a2711ad760bb76af11a809a0e" gracePeriod=30 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.937314 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-7b8886d4fd-qn9sz" podUID="fad66107-0589-4ed8-94dc-fd29f2f58c43" containerName="barbican-api" containerID="cri-o://3e2ad39675705ef9c70fa28adb97c9a01666ba00b4202ad69b0fcb8f9b4aba7d" gracePeriod=30 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.944840 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-7b8886d4fd-qn9sz" podUID="fad66107-0589-4ed8-94dc-fd29f2f58c43" containerName="barbican-api" probeResult="failure" output="Get \"https://10.217.0.162:9311/healthcheck\": EOF" Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.945030 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-pzb9h"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.947047 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-7b8886d4fd-qn9sz" podUID="fad66107-0589-4ed8-94dc-fd29f2f58c43" containerName="barbican-api-log" probeResult="failure" output="Get \"https://10.217.0.162:9311/healthcheck\": EOF" Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.953072 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-ztp85"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.960159 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-48eb-account-create-update-scv9x"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:52.969382 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.036086 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-ztp85"] Jan 21 15:48:53 crc kubenswrapper[5021]: E0121 15:48:53.037096 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="11ad5f2262a8d76744bb2dbd3bcaec7d5afd1ded3ec49308661f05117d09f401" cmd=["/usr/bin/pidof","ovsdb-server"] Jan 21 15:48:53 crc kubenswrapper[5021]: E0121 15:48:53.051688 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="11ad5f2262a8d76744bb2dbd3bcaec7d5afd1ded3ec49308661f05117d09f401" cmd=["/usr/bin/pidof","ovsdb-server"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.060105 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.060389 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="7e624ae4-b10e-41c8-a09d-9b81cc213cf6" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://7a16782bd143894b1bf2e2108f8b5bf024d72b94f1b3f7fcd30087ce2e16d557" gracePeriod=30 Jan 21 15:48:53 crc kubenswrapper[5021]: E0121 15:48:53.061151 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="11ad5f2262a8d76744bb2dbd3bcaec7d5afd1ded3ec49308661f05117d09f401" cmd=["/usr/bin/pidof","ovsdb-server"] Jan 21 15:48:53 crc kubenswrapper[5021]: E0121 15:48:53.061208 5021 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovsdbserver-nb-0" podUID="62419df2-740b-473d-8fff-9ea018a268e5" containerName="ovsdbserver-nb" Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.070054 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-mpfp7"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.092070 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.102055 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-s49kl"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.109418 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.109648 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-conductor-0" podUID="a6ed9dcf-812f-4945-ac9d-43839bb27349" containerName="nova-cell1-conductor-conductor" containerID="cri-o://c6c487064a50a13818b80449ec45c315dcbd4c10322323fbef91c6b9e74ca755" gracePeriod=30 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.118600 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-s49kl"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.126540 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-p4kbs"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.140563 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-p4kbs"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.148384 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.148628 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell0-conductor-0" podUID="e56d063f-18e5-49af-8bfc-892629a34e88" containerName="nova-cell0-conductor-conductor" containerID="cri-o://fbc7ac5c64b14be83eb8080ee8b54339e51698653f7de8500a2e5fc7fd361ff2" gracePeriod=30 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.183197 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.183457 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="0787e96e-5c19-467d-9ad4-ec70202c8cdf" containerName="nova-scheduler-scheduler" containerID="cri-o://16060f449a7025ebebb9de9a238ef2f530ca6e0bfb74d144bfcdaf9b91b44f23" gracePeriod=30 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.197253 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="2dff28e1-6d0f-4a7d-8fcf-0edf26e63825" containerName="rabbitmq" containerID="cri-o://44ab8303cf36ed1256a72700def0f8fdb1a1e4a5f2dd2a14ca80a744759920ec" gracePeriod=604800 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.252459 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-cell1-galera-0" podUID="1093d499-bd73-4de4-b999-a7e9835b3124" containerName="galera" containerID="cri-o://de590a6c44256e84fcc664627be518c9fb2c460d8c59cdb3123cc99eebe47520" gracePeriod=30 Jan 21 15:48:53 crc kubenswrapper[5021]: E0121 15:48:53.325545 5021 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Jan 21 15:48:53 crc kubenswrapper[5021]: E0121 15:48:53.325611 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-config-data podName:2dff28e1-6d0f-4a7d-8fcf-0edf26e63825 nodeName:}" failed. No retries permitted until 2026-01-21 15:48:55.325596371 +0000 UTC m=+1476.860710260 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-config-data") pod "rabbitmq-cell1-server-0" (UID: "2dff28e1-6d0f-4a7d-8fcf-0edf26e63825") : configmap "rabbitmq-cell1-config-data" not found Jan 21 15:48:53 crc kubenswrapper[5021]: E0121 15:48:53.358828 5021 handlers.go:78] "Exec lifecycle hook for Container in Pod failed" err=< Jan 21 15:48:53 crc kubenswrapper[5021]: command '/usr/local/bin/container-scripts/stop-ovsdb-server.sh' exited with 137: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Jan 21 15:48:53 crc kubenswrapper[5021]: + source /usr/local/bin/container-scripts/functions Jan 21 15:48:53 crc kubenswrapper[5021]: ++ OVNBridge=br-int Jan 21 15:48:53 crc kubenswrapper[5021]: ++ OVNRemote=tcp:localhost:6642 Jan 21 15:48:53 crc kubenswrapper[5021]: ++ OVNEncapType=geneve Jan 21 15:48:53 crc kubenswrapper[5021]: ++ OVNAvailabilityZones= Jan 21 15:48:53 crc kubenswrapper[5021]: ++ EnableChassisAsGateway=true Jan 21 15:48:53 crc kubenswrapper[5021]: ++ PhysicalNetworks= Jan 21 15:48:53 crc kubenswrapper[5021]: ++ OVNHostName= Jan 21 15:48:53 crc kubenswrapper[5021]: ++ DB_FILE=/etc/openvswitch/conf.db Jan 21 15:48:53 crc kubenswrapper[5021]: ++ ovs_dir=/var/lib/openvswitch Jan 21 15:48:53 crc kubenswrapper[5021]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Jan 21 15:48:53 crc kubenswrapper[5021]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Jan 21 15:48:53 crc kubenswrapper[5021]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Jan 21 15:48:53 crc kubenswrapper[5021]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Jan 21 15:48:53 crc kubenswrapper[5021]: + sleep 0.5 Jan 21 15:48:53 crc kubenswrapper[5021]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Jan 21 15:48:53 crc kubenswrapper[5021]: + sleep 0.5 Jan 21 15:48:53 crc kubenswrapper[5021]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Jan 21 15:48:53 crc kubenswrapper[5021]: + cleanup_ovsdb_server_semaphore Jan 21 15:48:53 crc kubenswrapper[5021]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Jan 21 15:48:53 crc kubenswrapper[5021]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Jan 21 15:48:53 crc kubenswrapper[5021]: > execCommand=["/usr/local/bin/container-scripts/stop-ovsdb-server.sh"] containerName="ovsdb-server" pod="openstack/ovn-controller-ovs-bk98m" message=< Jan 21 15:48:53 crc kubenswrapper[5021]: Exiting ovsdb-server (5) [ OK ] Jan 21 15:48:53 crc kubenswrapper[5021]: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Jan 21 15:48:53 crc kubenswrapper[5021]: + source /usr/local/bin/container-scripts/functions Jan 21 15:48:53 crc kubenswrapper[5021]: ++ OVNBridge=br-int Jan 21 15:48:53 crc kubenswrapper[5021]: ++ OVNRemote=tcp:localhost:6642 Jan 21 15:48:53 crc kubenswrapper[5021]: ++ OVNEncapType=geneve Jan 21 15:48:53 crc kubenswrapper[5021]: ++ OVNAvailabilityZones= Jan 21 15:48:53 crc kubenswrapper[5021]: ++ EnableChassisAsGateway=true Jan 21 15:48:53 crc kubenswrapper[5021]: ++ PhysicalNetworks= Jan 21 15:48:53 crc kubenswrapper[5021]: ++ OVNHostName= Jan 21 15:48:53 crc kubenswrapper[5021]: ++ DB_FILE=/etc/openvswitch/conf.db Jan 21 15:48:53 crc kubenswrapper[5021]: ++ ovs_dir=/var/lib/openvswitch Jan 21 15:48:53 crc kubenswrapper[5021]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Jan 21 15:48:53 crc kubenswrapper[5021]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Jan 21 15:48:53 crc kubenswrapper[5021]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Jan 21 15:48:53 crc kubenswrapper[5021]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Jan 21 15:48:53 crc kubenswrapper[5021]: + sleep 0.5 Jan 21 15:48:53 crc kubenswrapper[5021]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Jan 21 15:48:53 crc kubenswrapper[5021]: + sleep 0.5 Jan 21 15:48:53 crc kubenswrapper[5021]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Jan 21 15:48:53 crc kubenswrapper[5021]: + cleanup_ovsdb_server_semaphore Jan 21 15:48:53 crc kubenswrapper[5021]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Jan 21 15:48:53 crc kubenswrapper[5021]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Jan 21 15:48:53 crc kubenswrapper[5021]: > Jan 21 15:48:53 crc kubenswrapper[5021]: E0121 15:48:53.358872 5021 kuberuntime_container.go:691] "PreStop hook failed" err=< Jan 21 15:48:53 crc kubenswrapper[5021]: command '/usr/local/bin/container-scripts/stop-ovsdb-server.sh' exited with 137: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Jan 21 15:48:53 crc kubenswrapper[5021]: + source /usr/local/bin/container-scripts/functions Jan 21 15:48:53 crc kubenswrapper[5021]: ++ OVNBridge=br-int Jan 21 15:48:53 crc kubenswrapper[5021]: ++ OVNRemote=tcp:localhost:6642 Jan 21 15:48:53 crc kubenswrapper[5021]: ++ OVNEncapType=geneve Jan 21 15:48:53 crc kubenswrapper[5021]: ++ OVNAvailabilityZones= Jan 21 15:48:53 crc kubenswrapper[5021]: ++ EnableChassisAsGateway=true Jan 21 15:48:53 crc kubenswrapper[5021]: ++ PhysicalNetworks= Jan 21 15:48:53 crc kubenswrapper[5021]: ++ OVNHostName= Jan 21 15:48:53 crc kubenswrapper[5021]: ++ DB_FILE=/etc/openvswitch/conf.db Jan 21 15:48:53 crc kubenswrapper[5021]: ++ ovs_dir=/var/lib/openvswitch Jan 21 15:48:53 crc kubenswrapper[5021]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Jan 21 15:48:53 crc kubenswrapper[5021]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Jan 21 15:48:53 crc kubenswrapper[5021]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Jan 21 15:48:53 crc kubenswrapper[5021]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Jan 21 15:48:53 crc kubenswrapper[5021]: + sleep 0.5 Jan 21 15:48:53 crc kubenswrapper[5021]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Jan 21 15:48:53 crc kubenswrapper[5021]: + sleep 0.5 Jan 21 15:48:53 crc kubenswrapper[5021]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Jan 21 15:48:53 crc kubenswrapper[5021]: + cleanup_ovsdb_server_semaphore Jan 21 15:48:53 crc kubenswrapper[5021]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Jan 21 15:48:53 crc kubenswrapper[5021]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Jan 21 15:48:53 crc kubenswrapper[5021]: > pod="openstack/ovn-controller-ovs-bk98m" podUID="06ba8703-2573-4c30-82ec-36290cf378f4" containerName="ovsdb-server" containerID="cri-o://c51e2000b1b5cdd558697f3c6e26fdd8147721993df1b96e0e8e62f974486b5d" Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.358953 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-ovs-bk98m" podUID="06ba8703-2573-4c30-82ec-36290cf378f4" containerName="ovsdb-server" containerID="cri-o://c51e2000b1b5cdd558697f3c6e26fdd8147721993df1b96e0e8e62f974486b5d" gracePeriod=29 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.415629 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-c764x_0018f301-49d2-4884-abf4-23b4687de8fd/openstack-network-exporter/0.log" Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.415670 5021 generic.go:334] "Generic (PLEG): container finished" podID="0018f301-49d2-4884-abf4-23b4687de8fd" containerID="68cab115728d1c091f3a993f8259c122399204e30f7f18e3a2170fa8fd30b98f" exitCode=2 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.415755 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-c764x" event={"ID":"0018f301-49d2-4884-abf4-23b4687de8fd","Type":"ContainerDied","Data":"68cab115728d1c091f3a993f8259c122399204e30f7f18e3a2170fa8fd30b98f"} Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.427535 5021 generic.go:334] "Generic (PLEG): container finished" podID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerID="d0d02b0697c6f0cdbe32b4c15779fa2b7fb9db8ad0d4beee7917a8570d9ce131" exitCode=0 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.427558 5021 generic.go:334] "Generic (PLEG): container finished" podID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerID="286ee38dedf5ca3a893d36e49ab99761202c13f3e2d7786385e279604c029ca3" exitCode=0 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.427564 5021 generic.go:334] "Generic (PLEG): container finished" podID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerID="70645c7bc51255ddd66eef76b13c0c8daa2f66c30285a645c871c40c0117099a" exitCode=0 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.427571 5021 generic.go:334] "Generic (PLEG): container finished" podID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerID="ebcb5861aa10209409b721ea6e382ae9a04e2327d3329449b46709721ed4a126" exitCode=0 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.427577 5021 generic.go:334] "Generic (PLEG): container finished" podID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerID="de92f672063435b72b37aaebf43b6130f273d9c86bb2fbfd7c96ca15e567638a" exitCode=0 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.427583 5021 generic.go:334] "Generic (PLEG): container finished" podID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerID="0d1d92941497d1a0e50ff5085e977bdf5928704f84bf870731efcc8fcb1d2f1c" exitCode=0 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.427588 5021 generic.go:334] "Generic (PLEG): container finished" podID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerID="c5661f129fdffc8a3bd461399ca660bc553970556f3c6af116113c924c302646" exitCode=0 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.427681 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c57ca8a9-e2f8-4404-b56f-649297cba618","Type":"ContainerDied","Data":"d0d02b0697c6f0cdbe32b4c15779fa2b7fb9db8ad0d4beee7917a8570d9ce131"} Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.427708 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c57ca8a9-e2f8-4404-b56f-649297cba618","Type":"ContainerDied","Data":"286ee38dedf5ca3a893d36e49ab99761202c13f3e2d7786385e279604c029ca3"} Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.427718 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c57ca8a9-e2f8-4404-b56f-649297cba618","Type":"ContainerDied","Data":"70645c7bc51255ddd66eef76b13c0c8daa2f66c30285a645c871c40c0117099a"} Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.427727 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c57ca8a9-e2f8-4404-b56f-649297cba618","Type":"ContainerDied","Data":"ebcb5861aa10209409b721ea6e382ae9a04e2327d3329449b46709721ed4a126"} Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.427736 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c57ca8a9-e2f8-4404-b56f-649297cba618","Type":"ContainerDied","Data":"de92f672063435b72b37aaebf43b6130f273d9c86bb2fbfd7c96ca15e567638a"} Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.427745 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c57ca8a9-e2f8-4404-b56f-649297cba618","Type":"ContainerDied","Data":"0d1d92941497d1a0e50ff5085e977bdf5928704f84bf870731efcc8fcb1d2f1c"} Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.427755 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c57ca8a9-e2f8-4404-b56f-649297cba618","Type":"ContainerDied","Data":"c5661f129fdffc8a3bd461399ca660bc553970556f3c6af116113c924c302646"} Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.430534 5021 generic.go:334] "Generic (PLEG): container finished" podID="4687452c-74ba-4f3e-ac17-1cf4c2e514d8" containerID="16670df6a896fe5b4ccec437b88876c2007832ffa1ddab3c5a874249577f502d" exitCode=0 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.430726 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-7bbf467d99-62cpf" event={"ID":"4687452c-74ba-4f3e-ac17-1cf4c2e514d8","Type":"ContainerDied","Data":"16670df6a896fe5b4ccec437b88876c2007832ffa1ddab3c5a874249577f502d"} Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.433467 5021 generic.go:334] "Generic (PLEG): container finished" podID="16b38c07-3cc7-45b6-9145-514af8206bdb" containerID="2fc2eb7f65b6d060d0dd9c5922dc5172b7314bb18a053569c709b7586f92be06" exitCode=0 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.433525 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6c5df9-knczx" event={"ID":"16b38c07-3cc7-45b6-9145-514af8206bdb","Type":"ContainerDied","Data":"2fc2eb7f65b6d060d0dd9c5922dc5172b7314bb18a053569c709b7586f92be06"} Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.437001 5021 generic.go:334] "Generic (PLEG): container finished" podID="110a1110-f52a-40e4-8402-166be87650a8" containerID="f0fe818e9e2a058656b7e5d772bd6c84da9de9b5ca099a5c958f3ab93f7c5392" exitCode=143 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.437112 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"110a1110-f52a-40e4-8402-166be87650a8","Type":"ContainerDied","Data":"f0fe818e9e2a058656b7e5d772bd6c84da9de9b5ca099a5c958f3ab93f7c5392"} Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.439625 5021 generic.go:334] "Generic (PLEG): container finished" podID="fad66107-0589-4ed8-94dc-fd29f2f58c43" containerID="b0be39c8ae52be02d5990fbd6de2c149adc18e9a2711ad760bb76af11a809a0e" exitCode=143 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.439684 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7b8886d4fd-qn9sz" event={"ID":"fad66107-0589-4ed8-94dc-fd29f2f58c43","Type":"ContainerDied","Data":"b0be39c8ae52be02d5990fbd6de2c149adc18e9a2711ad760bb76af11a809a0e"} Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.442151 5021 generic.go:334] "Generic (PLEG): container finished" podID="7369dbac-285b-4322-8322-41b1b450d199" containerID="0771568118c8d7a6aa5ededbd663532cb393bc36afe29638946df189c2108dfc" exitCode=0 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.442202 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"7369dbac-285b-4322-8322-41b1b450d199","Type":"ContainerDied","Data":"0771568118c8d7a6aa5ededbd663532cb393bc36afe29638946df189c2108dfc"} Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.449657 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-ovs-bk98m" podUID="06ba8703-2573-4c30-82ec-36290cf378f4" containerName="ovs-vswitchd" containerID="cri-o://c9aea0f956b83c345479dd657b4586b966e9d9026a0a0d504fe374b1d7498204" gracePeriod=29 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.452892 5021 generic.go:334] "Generic (PLEG): container finished" podID="e7a38a9d-65cf-48dd-8f36-44a78a53e48f" containerID="233265452bf90deac8f8558f1e900daf7f05dda82bc41b72a507e02c47ad409b" exitCode=143 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.452964 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-76bc56d748-8glcs" event={"ID":"e7a38a9d-65cf-48dd-8f36-44a78a53e48f","Type":"ContainerDied","Data":"233265452bf90deac8f8558f1e900daf7f05dda82bc41b72a507e02c47ad409b"} Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.456375 5021 generic.go:334] "Generic (PLEG): container finished" podID="7ccf7211-3a03-41f1-839a-7bda93e55d4b" containerID="acb12fa4b5f061852748af753502ce94371a4a867002ad11d238b65b996be3e7" exitCode=143 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.456464 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"7ccf7211-3a03-41f1-839a-7bda93e55d4b","Type":"ContainerDied","Data":"acb12fa4b5f061852748af753502ce94371a4a867002ad11d238b65b996be3e7"} Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.461938 5021 generic.go:334] "Generic (PLEG): container finished" podID="0415e622-e0cf-4097-865a-a0970f2acc07" containerID="d29d9648241688d22882bb1ef26e5b75f7e75ce105b478819e5cb0b36d9eaa34" exitCode=143 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.462019 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"0415e622-e0cf-4097-865a-a0970f2acc07","Type":"ContainerDied","Data":"d29d9648241688d22882bb1ef26e5b75f7e75ce105b478819e5cb0b36d9eaa34"} Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.467268 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_62419df2-740b-473d-8fff-9ea018a268e5/ovsdbserver-nb/0.log" Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.467399 5021 generic.go:334] "Generic (PLEG): container finished" podID="62419df2-740b-473d-8fff-9ea018a268e5" containerID="be5a47b27b20c5b0ab887ff6ead17e4f4a7b05b5ba87488bbbbe349813177d79" exitCode=2 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.467506 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"62419df2-740b-473d-8fff-9ea018a268e5","Type":"ContainerDied","Data":"be5a47b27b20c5b0ab887ff6ead17e4f4a7b05b5ba87488bbbbe349813177d79"} Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.469980 5021 generic.go:334] "Generic (PLEG): container finished" podID="4d076ab0-b0c8-48a0-baa0-589c99376c72" containerID="fcc7d2e930abd6b478f82fcfe23ce92e362592301c9c8fb4f5dea9d2b2bedb88" exitCode=143 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.470051 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7bc8f89b55-8c6t2" event={"ID":"4d076ab0-b0c8-48a0-baa0-589c99376c72","Type":"ContainerDied","Data":"fcc7d2e930abd6b478f82fcfe23ce92e362592301c9c8fb4f5dea9d2b2bedb88"} Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.472122 5021 generic.go:334] "Generic (PLEG): container finished" podID="446aadfb-ac91-4335-9bac-4f8d7663ab6a" containerID="13e437facb9eed154cb3a1fc466a26799000944b7d4e3a944a35585bf94ce10b" exitCode=143 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.472203 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"446aadfb-ac91-4335-9bac-4f8d7663ab6a","Type":"ContainerDied","Data":"13e437facb9eed154cb3a1fc466a26799000944b7d4e3a944a35585bf94ce10b"} Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.476637 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_70ec8329-7d58-465c-9234-7e4543fe4538/ovsdbserver-sb/0.log" Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.476708 5021 generic.go:334] "Generic (PLEG): container finished" podID="70ec8329-7d58-465c-9234-7e4543fe4538" containerID="00dc1a70c5582842a5b18750882608ff55e28ecac0f1421cc5d2e9d1a3cd1b00" exitCode=2 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.476723 5021 generic.go:334] "Generic (PLEG): container finished" podID="70ec8329-7d58-465c-9234-7e4543fe4538" containerID="c2577e6926c2b20d5997e13950dc6cfaeb16569002c47c4ba9f66d3aaaed7055" exitCode=143 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.476819 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"70ec8329-7d58-465c-9234-7e4543fe4538","Type":"ContainerDied","Data":"00dc1a70c5582842a5b18750882608ff55e28ecac0f1421cc5d2e9d1a3cd1b00"} Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.476844 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"70ec8329-7d58-465c-9234-7e4543fe4538","Type":"ContainerDied","Data":"c2577e6926c2b20d5997e13950dc6cfaeb16569002c47c4ba9f66d3aaaed7055"} Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.479681 5021 generic.go:334] "Generic (PLEG): container finished" podID="deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca" containerID="f5e3f741e90cddcaa56c488a9ce56cfd5d36717ab8f8f3d4ee72791c52b6336c" exitCode=137 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.483705 5021 generic.go:334] "Generic (PLEG): container finished" podID="469c5416-c102-43c5-8801-502231a86238" containerID="568335520c5abb99b1d9dd2a7aa68f565adae7b72f51cf91144f9ac64fbbdece" exitCode=143 Jan 21 15:48:53 crc kubenswrapper[5021]: I0121 15:48:53.483725 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-57f8ddbc76-dgfjh" event={"ID":"469c5416-c102-43c5-8801-502231a86238","Type":"ContainerDied","Data":"568335520c5abb99b1d9dd2a7aa68f565adae7b72f51cf91144f9ac64fbbdece"} Jan 21 15:48:53 crc kubenswrapper[5021]: E0121 15:48:53.551047 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 894abc06fdb5fc6222a6ec829e8805ef293fd5d9f74c877b7ed68626d7794fdd is running failed: container process not found" containerID="894abc06fdb5fc6222a6ec829e8805ef293fd5d9f74c877b7ed68626d7794fdd" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Jan 21 15:48:53 crc kubenswrapper[5021]: E0121 15:48:53.551686 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 894abc06fdb5fc6222a6ec829e8805ef293fd5d9f74c877b7ed68626d7794fdd is running failed: container process not found" containerID="894abc06fdb5fc6222a6ec829e8805ef293fd5d9f74c877b7ed68626d7794fdd" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Jan 21 15:48:53 crc kubenswrapper[5021]: E0121 15:48:53.552209 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 894abc06fdb5fc6222a6ec829e8805ef293fd5d9f74c877b7ed68626d7794fdd is running failed: container process not found" containerID="894abc06fdb5fc6222a6ec829e8805ef293fd5d9f74c877b7ed68626d7794fdd" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Jan 21 15:48:53 crc kubenswrapper[5021]: E0121 15:48:53.552240 5021 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 894abc06fdb5fc6222a6ec829e8805ef293fd5d9f74c877b7ed68626d7794fdd is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-northd-0" podUID="e360fedf-3856-4f89-980d-f5f282e2f696" containerName="ovn-northd" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.004292 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_e360fedf-3856-4f89-980d-f5f282e2f696/ovn-northd/0.log" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.004671 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.010836 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-c764x_0018f301-49d2-4884-abf4-23b4687de8fd/openstack-network-exporter/0.log" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.010927 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-c764x" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.024256 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.054478 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnnxm\" (UniqueName: \"kubernetes.io/projected/e360fedf-3856-4f89-980d-f5f282e2f696-kube-api-access-rnnxm\") pod \"e360fedf-3856-4f89-980d-f5f282e2f696\" (UID: \"e360fedf-3856-4f89-980d-f5f282e2f696\") " Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.054559 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/e360fedf-3856-4f89-980d-f5f282e2f696-ovn-rundir\") pod \"e360fedf-3856-4f89-980d-f5f282e2f696\" (UID: \"e360fedf-3856-4f89-980d-f5f282e2f696\") " Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.054652 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e360fedf-3856-4f89-980d-f5f282e2f696-combined-ca-bundle\") pod \"e360fedf-3856-4f89-980d-f5f282e2f696\" (UID: \"e360fedf-3856-4f89-980d-f5f282e2f696\") " Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.054686 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/e360fedf-3856-4f89-980d-f5f282e2f696-ovn-northd-tls-certs\") pod \"e360fedf-3856-4f89-980d-f5f282e2f696\" (UID: \"e360fedf-3856-4f89-980d-f5f282e2f696\") " Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.054724 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2m5kw\" (UniqueName: \"kubernetes.io/projected/0018f301-49d2-4884-abf4-23b4687de8fd-kube-api-access-2m5kw\") pod \"0018f301-49d2-4884-abf4-23b4687de8fd\" (UID: \"0018f301-49d2-4884-abf4-23b4687de8fd\") " Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.054755 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0018f301-49d2-4884-abf4-23b4687de8fd-config\") pod \"0018f301-49d2-4884-abf4-23b4687de8fd\" (UID: \"0018f301-49d2-4884-abf4-23b4687de8fd\") " Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.054808 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/0018f301-49d2-4884-abf4-23b4687de8fd-ovn-rundir\") pod \"0018f301-49d2-4884-abf4-23b4687de8fd\" (UID: \"0018f301-49d2-4884-abf4-23b4687de8fd\") " Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.054866 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/0018f301-49d2-4884-abf4-23b4687de8fd-ovs-rundir\") pod \"0018f301-49d2-4884-abf4-23b4687de8fd\" (UID: \"0018f301-49d2-4884-abf4-23b4687de8fd\") " Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.054898 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e360fedf-3856-4f89-980d-f5f282e2f696-config\") pod \"e360fedf-3856-4f89-980d-f5f282e2f696\" (UID: \"e360fedf-3856-4f89-980d-f5f282e2f696\") " Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.054957 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e360fedf-3856-4f89-980d-f5f282e2f696-scripts\") pod \"e360fedf-3856-4f89-980d-f5f282e2f696\" (UID: \"e360fedf-3856-4f89-980d-f5f282e2f696\") " Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.054992 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e360fedf-3856-4f89-980d-f5f282e2f696-metrics-certs-tls-certs\") pod \"e360fedf-3856-4f89-980d-f5f282e2f696\" (UID: \"e360fedf-3856-4f89-980d-f5f282e2f696\") " Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.055026 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/0018f301-49d2-4884-abf4-23b4687de8fd-metrics-certs-tls-certs\") pod \"0018f301-49d2-4884-abf4-23b4687de8fd\" (UID: \"0018f301-49d2-4884-abf4-23b4687de8fd\") " Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.055044 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0018f301-49d2-4884-abf4-23b4687de8fd-ovs-rundir" (OuterVolumeSpecName: "ovs-rundir") pod "0018f301-49d2-4884-abf4-23b4687de8fd" (UID: "0018f301-49d2-4884-abf4-23b4687de8fd"). InnerVolumeSpecName "ovs-rundir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.055056 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0018f301-49d2-4884-abf4-23b4687de8fd-combined-ca-bundle\") pod \"0018f301-49d2-4884-abf4-23b4687de8fd\" (UID: \"0018f301-49d2-4884-abf4-23b4687de8fd\") " Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.055643 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0018f301-49d2-4884-abf4-23b4687de8fd-config" (OuterVolumeSpecName: "config") pod "0018f301-49d2-4884-abf4-23b4687de8fd" (UID: "0018f301-49d2-4884-abf4-23b4687de8fd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.055804 5021 reconciler_common.go:293] "Volume detached for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/0018f301-49d2-4884-abf4-23b4687de8fd-ovs-rundir\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.055846 5021 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0018f301-49d2-4884-abf4-23b4687de8fd-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.055877 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0018f301-49d2-4884-abf4-23b4687de8fd-ovn-rundir" (OuterVolumeSpecName: "ovn-rundir") pod "0018f301-49d2-4884-abf4-23b4687de8fd" (UID: "0018f301-49d2-4884-abf4-23b4687de8fd"). InnerVolumeSpecName "ovn-rundir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.056166 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e360fedf-3856-4f89-980d-f5f282e2f696-scripts" (OuterVolumeSpecName: "scripts") pod "e360fedf-3856-4f89-980d-f5f282e2f696" (UID: "e360fedf-3856-4f89-980d-f5f282e2f696"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.056412 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e360fedf-3856-4f89-980d-f5f282e2f696-config" (OuterVolumeSpecName: "config") pod "e360fedf-3856-4f89-980d-f5f282e2f696" (UID: "e360fedf-3856-4f89-980d-f5f282e2f696"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.056666 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e360fedf-3856-4f89-980d-f5f282e2f696-ovn-rundir" (OuterVolumeSpecName: "ovn-rundir") pod "e360fedf-3856-4f89-980d-f5f282e2f696" (UID: "e360fedf-3856-4f89-980d-f5f282e2f696"). InnerVolumeSpecName "ovn-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.060961 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0018f301-49d2-4884-abf4-23b4687de8fd-kube-api-access-2m5kw" (OuterVolumeSpecName: "kube-api-access-2m5kw") pod "0018f301-49d2-4884-abf4-23b4687de8fd" (UID: "0018f301-49d2-4884-abf4-23b4687de8fd"). InnerVolumeSpecName "kube-api-access-2m5kw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.079294 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e360fedf-3856-4f89-980d-f5f282e2f696-kube-api-access-rnnxm" (OuterVolumeSpecName: "kube-api-access-rnnxm") pod "e360fedf-3856-4f89-980d-f5f282e2f696" (UID: "e360fedf-3856-4f89-980d-f5f282e2f696"). InnerVolumeSpecName "kube-api-access-rnnxm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.091238 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0018f301-49d2-4884-abf4-23b4687de8fd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0018f301-49d2-4884-abf4-23b4687de8fd" (UID: "0018f301-49d2-4884-abf4-23b4687de8fd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.096208 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e360fedf-3856-4f89-980d-f5f282e2f696-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e360fedf-3856-4f89-980d-f5f282e2f696" (UID: "e360fedf-3856-4f89-980d-f5f282e2f696"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.140889 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e360fedf-3856-4f89-980d-f5f282e2f696-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "e360fedf-3856-4f89-980d-f5f282e2f696" (UID: "e360fedf-3856-4f89-980d-f5f282e2f696"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.157223 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e624ae4-b10e-41c8-a09d-9b81cc213cf6-combined-ca-bundle\") pod \"7e624ae4-b10e-41c8-a09d-9b81cc213cf6\" (UID: \"7e624ae4-b10e-41c8-a09d-9b81cc213cf6\") " Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.157371 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7e624ae4-b10e-41c8-a09d-9b81cc213cf6-config-data\") pod \"7e624ae4-b10e-41c8-a09d-9b81cc213cf6\" (UID: \"7e624ae4-b10e-41c8-a09d-9b81cc213cf6\") " Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.157493 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/7e624ae4-b10e-41c8-a09d-9b81cc213cf6-nova-novncproxy-tls-certs\") pod \"7e624ae4-b10e-41c8-a09d-9b81cc213cf6\" (UID: \"7e624ae4-b10e-41c8-a09d-9b81cc213cf6\") " Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.157547 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xjvrt\" (UniqueName: \"kubernetes.io/projected/7e624ae4-b10e-41c8-a09d-9b81cc213cf6-kube-api-access-xjvrt\") pod \"7e624ae4-b10e-41c8-a09d-9b81cc213cf6\" (UID: \"7e624ae4-b10e-41c8-a09d-9b81cc213cf6\") " Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.157578 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/7e624ae4-b10e-41c8-a09d-9b81cc213cf6-vencrypt-tls-certs\") pod \"7e624ae4-b10e-41c8-a09d-9b81cc213cf6\" (UID: \"7e624ae4-b10e-41c8-a09d-9b81cc213cf6\") " Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.157801 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e360fedf-3856-4f89-980d-f5f282e2f696-ovn-northd-tls-certs" (OuterVolumeSpecName: "ovn-northd-tls-certs") pod "e360fedf-3856-4f89-980d-f5f282e2f696" (UID: "e360fedf-3856-4f89-980d-f5f282e2f696"). InnerVolumeSpecName "ovn-northd-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.158477 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0018f301-49d2-4884-abf4-23b4687de8fd-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "0018f301-49d2-4884-abf4-23b4687de8fd" (UID: "0018f301-49d2-4884-abf4-23b4687de8fd"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.158582 5021 reconciler_common.go:293] "Volume detached for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/e360fedf-3856-4f89-980d-f5f282e2f696-ovn-rundir\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.158601 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e360fedf-3856-4f89-980d-f5f282e2f696-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.158615 5021 reconciler_common.go:293] "Volume detached for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/e360fedf-3856-4f89-980d-f5f282e2f696-ovn-northd-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.158629 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2m5kw\" (UniqueName: \"kubernetes.io/projected/0018f301-49d2-4884-abf4-23b4687de8fd-kube-api-access-2m5kw\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.158639 5021 reconciler_common.go:293] "Volume detached for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/0018f301-49d2-4884-abf4-23b4687de8fd-ovn-rundir\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.158652 5021 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e360fedf-3856-4f89-980d-f5f282e2f696-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.158662 5021 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e360fedf-3856-4f89-980d-f5f282e2f696-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.158673 5021 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e360fedf-3856-4f89-980d-f5f282e2f696-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.158685 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0018f301-49d2-4884-abf4-23b4687de8fd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.158697 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnnxm\" (UniqueName: \"kubernetes.io/projected/e360fedf-3856-4f89-980d-f5f282e2f696-kube-api-access-rnnxm\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:54 crc kubenswrapper[5021]: E0121 15:48:54.158674 5021 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Jan 21 15:48:54 crc kubenswrapper[5021]: E0121 15:48:54.158757 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-config-data podName:b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b nodeName:}" failed. No retries permitted until 2026-01-21 15:48:58.158740569 +0000 UTC m=+1479.693854458 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-config-data") pod "rabbitmq-server-0" (UID: "b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b") : configmap "rabbitmq-config-data" not found Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.161935 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e624ae4-b10e-41c8-a09d-9b81cc213cf6-kube-api-access-xjvrt" (OuterVolumeSpecName: "kube-api-access-xjvrt") pod "7e624ae4-b10e-41c8-a09d-9b81cc213cf6" (UID: "7e624ae4-b10e-41c8-a09d-9b81cc213cf6"). InnerVolumeSpecName "kube-api-access-xjvrt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.191659 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e624ae4-b10e-41c8-a09d-9b81cc213cf6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7e624ae4-b10e-41c8-a09d-9b81cc213cf6" (UID: "7e624ae4-b10e-41c8-a09d-9b81cc213cf6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.211632 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e624ae4-b10e-41c8-a09d-9b81cc213cf6-config-data" (OuterVolumeSpecName: "config-data") pod "7e624ae4-b10e-41c8-a09d-9b81cc213cf6" (UID: "7e624ae4-b10e-41c8-a09d-9b81cc213cf6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.259099 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e624ae4-b10e-41c8-a09d-9b81cc213cf6-nova-novncproxy-tls-certs" (OuterVolumeSpecName: "nova-novncproxy-tls-certs") pod "7e624ae4-b10e-41c8-a09d-9b81cc213cf6" (UID: "7e624ae4-b10e-41c8-a09d-9b81cc213cf6"). InnerVolumeSpecName "nova-novncproxy-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.260288 5021 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/0018f301-49d2-4884-abf4-23b4687de8fd-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.260318 5021 reconciler_common.go:293] "Volume detached for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/7e624ae4-b10e-41c8-a09d-9b81cc213cf6-nova-novncproxy-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.260332 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xjvrt\" (UniqueName: \"kubernetes.io/projected/7e624ae4-b10e-41c8-a09d-9b81cc213cf6-kube-api-access-xjvrt\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.260342 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e624ae4-b10e-41c8-a09d-9b81cc213cf6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.260350 5021 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7e624ae4-b10e-41c8-a09d-9b81cc213cf6-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.260426 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e624ae4-b10e-41c8-a09d-9b81cc213cf6-vencrypt-tls-certs" (OuterVolumeSpecName: "vencrypt-tls-certs") pod "7e624ae4-b10e-41c8-a09d-9b81cc213cf6" (UID: "7e624ae4-b10e-41c8-a09d-9b81cc213cf6"). InnerVolumeSpecName "vencrypt-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.365843 5021 reconciler_common.go:293] "Volume detached for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/7e624ae4-b10e-41c8-a09d-9b81cc213cf6-vencrypt-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.475528 5021 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-7bbf467d99-62cpf" podUID="4687452c-74ba-4f3e-ac17-1cf4c2e514d8" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.171:8080/healthcheck\": dial tcp 10.217.0.171:8080: connect: connection refused" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.475557 5021 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-7bbf467d99-62cpf" podUID="4687452c-74ba-4f3e-ac17-1cf4c2e514d8" containerName="proxy-server" probeResult="failure" output="Get \"https://10.217.0.171:8080/healthcheck\": dial tcp 10.217.0.171:8080: connect: connection refused" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.495975 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_62419df2-740b-473d-8fff-9ea018a268e5/ovsdbserver-nb/0.log" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.496049 5021 generic.go:334] "Generic (PLEG): container finished" podID="62419df2-740b-473d-8fff-9ea018a268e5" containerID="11ad5f2262a8d76744bb2dbd3bcaec7d5afd1ded3ec49308661f05117d09f401" exitCode=143 Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.496105 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"62419df2-740b-473d-8fff-9ea018a268e5","Type":"ContainerDied","Data":"11ad5f2262a8d76744bb2dbd3bcaec7d5afd1ded3ec49308661f05117d09f401"} Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.499081 5021 generic.go:334] "Generic (PLEG): container finished" podID="ddbf76eb-0e2a-4332-b741-0e0b63b60465" containerID="f2a67cef1ba2db3bd18f3dfff338b23523c5e1f0086e64b2d5a4f0a2940f8a1e" exitCode=0 Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.499216 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-c795c5585-m9bzp" event={"ID":"ddbf76eb-0e2a-4332-b741-0e0b63b60465","Type":"ContainerDied","Data":"f2a67cef1ba2db3bd18f3dfff338b23523c5e1f0086e64b2d5a4f0a2940f8a1e"} Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.502893 5021 generic.go:334] "Generic (PLEG): container finished" podID="06ba8703-2573-4c30-82ec-36290cf378f4" containerID="c51e2000b1b5cdd558697f3c6e26fdd8147721993df1b96e0e8e62f974486b5d" exitCode=0 Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.502964 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-bk98m" event={"ID":"06ba8703-2573-4c30-82ec-36290cf378f4","Type":"ContainerDied","Data":"c51e2000b1b5cdd558697f3c6e26fdd8147721993df1b96e0e8e62f974486b5d"} Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.506391 5021 generic.go:334] "Generic (PLEG): container finished" podID="ad959625-d43f-48c3-b42f-d35e63e9af44" containerID="d3222a69d9fcc61f98b79a68e5ce69836f0becb1b379e4dc1551706c9de15b9d" exitCode=143 Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.506485 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ad959625-d43f-48c3-b42f-d35e63e9af44","Type":"ContainerDied","Data":"d3222a69d9fcc61f98b79a68e5ce69836f0becb1b379e4dc1551706c9de15b9d"} Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.513500 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_e360fedf-3856-4f89-980d-f5f282e2f696/ovn-northd/0.log" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.513628 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"e360fedf-3856-4f89-980d-f5f282e2f696","Type":"ContainerDied","Data":"c067d31a71ae2cf59c27b06623ab31d53c5bbafc2b68dca60289c51f63eb9fbd"} Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.513673 5021 scope.go:117] "RemoveContainer" containerID="55ece29f6857bcec4db5996a88f5ec90a9d812572bf1153d37ef4b809c300168" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.513803 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.527774 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.529012 5021 generic.go:334] "Generic (PLEG): container finished" podID="7369dbac-285b-4322-8322-41b1b450d199" containerID="437d3b10fb0fb297b844f9dbf1d4a83367b420ef57cd073914c0525d5c579f5d" exitCode=0 Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.529060 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"7369dbac-285b-4322-8322-41b1b450d199","Type":"ContainerDied","Data":"437d3b10fb0fb297b844f9dbf1d4a83367b420ef57cd073914c0525d5c579f5d"} Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.531500 5021 generic.go:334] "Generic (PLEG): container finished" podID="4687452c-74ba-4f3e-ac17-1cf4c2e514d8" containerID="21d2a742eb18fb8403f215dcc111b5047c3d3f27852bd27e95a06b29a951b3cd" exitCode=0 Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.531548 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-7bbf467d99-62cpf" event={"ID":"4687452c-74ba-4f3e-ac17-1cf4c2e514d8","Type":"ContainerDied","Data":"21d2a742eb18fb8403f215dcc111b5047c3d3f27852bd27e95a06b29a951b3cd"} Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.534681 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-c764x_0018f301-49d2-4884-abf4-23b4687de8fd/openstack-network-exporter/0.log" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.534754 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-c764x" event={"ID":"0018f301-49d2-4884-abf4-23b4687de8fd","Type":"ContainerDied","Data":"7541a46688dbec61388faf3a31f0fd51845574fd4739138cf0ba1e00a9847012"} Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.534835 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-c764x" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.536972 5021 generic.go:334] "Generic (PLEG): container finished" podID="7e624ae4-b10e-41c8-a09d-9b81cc213cf6" containerID="7a16782bd143894b1bf2e2108f8b5bf024d72b94f1b3f7fcd30087ce2e16d557" exitCode=0 Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.537136 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.537406 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"7e624ae4-b10e-41c8-a09d-9b81cc213cf6","Type":"ContainerDied","Data":"7a16782bd143894b1bf2e2108f8b5bf024d72b94f1b3f7fcd30087ce2e16d557"} Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.537727 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"7e624ae4-b10e-41c8-a09d-9b81cc213cf6","Type":"ContainerDied","Data":"6b9587a4cdb15e5e26d32f8dc0967f54ab509fc6c672120f56325c2f4b9f598c"} Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.550610 5021 generic.go:334] "Generic (PLEG): container finished" podID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerID="0b98874dda34c3adb9708dfa4fddca97d42d24280001e6ca51c29fdf4e04e366" exitCode=0 Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.551097 5021 generic.go:334] "Generic (PLEG): container finished" podID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerID="21f76b84c77562932f1ebb5a263ddfe5a755ae6258ad955ca59a13307d229d84" exitCode=0 Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.551115 5021 generic.go:334] "Generic (PLEG): container finished" podID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerID="a89b180c0135b475ed1ba2315e698962a948ee0a359d0c123f97f5bef6cca782" exitCode=0 Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.551125 5021 generic.go:334] "Generic (PLEG): container finished" podID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerID="1ca716b9f11f9eb3707f3cd9724e75ee4eb6224c4c1e84903f22f728f45b5a6e" exitCode=0 Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.551133 5021 generic.go:334] "Generic (PLEG): container finished" podID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerID="6107623cd8f4072bc502c561748852925819d887d3f75272057a2e95b4ad1df7" exitCode=0 Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.551142 5021 generic.go:334] "Generic (PLEG): container finished" podID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerID="26edaec702317ca592975b15ba32e49f2dbd21f92807d5d36fce7823804ed53c" exitCode=0 Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.551150 5021 generic.go:334] "Generic (PLEG): container finished" podID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerID="5801c40336a7430220f5050e7b4c6fc8997538d48d91daa78538847e03eb5b9c" exitCode=0 Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.551175 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c57ca8a9-e2f8-4404-b56f-649297cba618","Type":"ContainerDied","Data":"0b98874dda34c3adb9708dfa4fddca97d42d24280001e6ca51c29fdf4e04e366"} Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.551206 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c57ca8a9-e2f8-4404-b56f-649297cba618","Type":"ContainerDied","Data":"21f76b84c77562932f1ebb5a263ddfe5a755ae6258ad955ca59a13307d229d84"} Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.551219 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c57ca8a9-e2f8-4404-b56f-649297cba618","Type":"ContainerDied","Data":"a89b180c0135b475ed1ba2315e698962a948ee0a359d0c123f97f5bef6cca782"} Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.551231 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c57ca8a9-e2f8-4404-b56f-649297cba618","Type":"ContainerDied","Data":"1ca716b9f11f9eb3707f3cd9724e75ee4eb6224c4c1e84903f22f728f45b5a6e"} Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.551239 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c57ca8a9-e2f8-4404-b56f-649297cba618","Type":"ContainerDied","Data":"6107623cd8f4072bc502c561748852925819d887d3f75272057a2e95b4ad1df7"} Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.551247 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c57ca8a9-e2f8-4404-b56f-649297cba618","Type":"ContainerDied","Data":"26edaec702317ca592975b15ba32e49f2dbd21f92807d5d36fce7823804ed53c"} Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.551255 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c57ca8a9-e2f8-4404-b56f-649297cba618","Type":"ContainerDied","Data":"5801c40336a7430220f5050e7b4c6fc8997538d48d91daa78538847e03eb5b9c"} Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.570492 5021 scope.go:117] "RemoveContainer" containerID="894abc06fdb5fc6222a6ec829e8805ef293fd5d9f74c877b7ed68626d7794fdd" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.672528 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca-openstack-config\") pod \"deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca\" (UID: \"deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca\") " Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.672605 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t5q2w\" (UniqueName: \"kubernetes.io/projected/deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca-kube-api-access-t5q2w\") pod \"deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca\" (UID: \"deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca\") " Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.672673 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca-openstack-config-secret\") pod \"deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca\" (UID: \"deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca\") " Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.672751 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca-combined-ca-bundle\") pod \"deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca\" (UID: \"deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca\") " Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.680226 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca-kube-api-access-t5q2w" (OuterVolumeSpecName: "kube-api-access-t5q2w") pod "deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca" (UID: "deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca"). InnerVolumeSpecName "kube-api-access-t5q2w". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.700253 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca" (UID: "deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.748634 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ae2bbb9-1d77-4394-b7a9-30c9ce199ffe" path="/var/lib/kubelet/pods/3ae2bbb9-1d77-4394-b7a9-30c9ce199ffe/volumes" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.749699 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="503da4ea-7c79-4bfe-b37b-d4db888b76f4" path="/var/lib/kubelet/pods/503da4ea-7c79-4bfe-b37b-d4db888b76f4/volumes" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.750403 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="624bb493-3f8a-4a62-993b-f66ccc317cc9" path="/var/lib/kubelet/pods/624bb493-3f8a-4a62-993b-f66ccc317cc9/volumes" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.751128 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7f2d2566-dcbf-437d-bf8b-32d6a49b34aa" path="/var/lib/kubelet/pods/7f2d2566-dcbf-437d-bf8b-32d6a49b34aa/volumes" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.752337 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="971451eb-9adb-4bfb-b3a3-9f0e31d8ff0e" path="/var/lib/kubelet/pods/971451eb-9adb-4bfb-b3a3-9f0e31d8ff0e/volumes" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.753029 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9a0ad139-d743-47ca-aecd-ee8a7ff59a7a" path="/var/lib/kubelet/pods/9a0ad139-d743-47ca-aecd-ee8a7ff59a7a/volumes" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.754061 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a580a9c4-cecd-4c8c-b928-8e8d5a686f60" path="/var/lib/kubelet/pods/a580a9c4-cecd-4c8c-b928-8e8d5a686f60/volumes" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.755589 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b88b0f54-d71c-4296-a3e8-770209fbfbc6" path="/var/lib/kubelet/pods/b88b0f54-d71c-4296-a3e8-770209fbfbc6/volumes" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.756500 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2" path="/var/lib/kubelet/pods/e9ae578b-d7f2-40b6-bdf5-fd31ed3330e2/volumes" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.758680 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca" (UID: "deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.777449 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t5q2w\" (UniqueName: \"kubernetes.io/projected/deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca-kube-api-access-t5q2w\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.777636 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.778036 5021 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca-openstack-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.783635 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca" (UID: "deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.824572 5021 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/neutron-c795c5585-m9bzp" podUID="ddbf76eb-0e2a-4332-b741-0e0b63b60465" containerName="neutron-httpd" probeResult="failure" output="Get \"https://10.217.0.172:9696/\": dial tcp 10.217.0.172:9696: connect: connection refused" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.857227 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.883279 5021 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.888933 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-10e4-account-create-update-ngtt5"] Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.899072 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-4f73-account-create-update-c2q4j"] Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.943891 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-mpfp7"] Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.945200 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_62419df2-740b-473d-8fff-9ea018a268e5/ovsdbserver-nb/0.log" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.945279 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.952718 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c7b6c5df9-knczx" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.967597 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-1670-account-create-update-5jkx8"] Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.980731 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-0608-account-create-update-m8cb5"] Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.984437 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/62419df2-740b-473d-8fff-9ea018a268e5-config\") pod \"62419df2-740b-473d-8fff-9ea018a268e5\" (UID: \"62419df2-740b-473d-8fff-9ea018a268e5\") " Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.984482 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7369dbac-285b-4322-8322-41b1b450d199-config-data-custom\") pod \"7369dbac-285b-4322-8322-41b1b450d199\" (UID: \"7369dbac-285b-4322-8322-41b1b450d199\") " Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.984515 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6nxx4\" (UniqueName: \"kubernetes.io/projected/62419df2-740b-473d-8fff-9ea018a268e5-kube-api-access-6nxx4\") pod \"62419df2-740b-473d-8fff-9ea018a268e5\" (UID: \"62419df2-740b-473d-8fff-9ea018a268e5\") " Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.984546 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/62419df2-740b-473d-8fff-9ea018a268e5-metrics-certs-tls-certs\") pod \"62419df2-740b-473d-8fff-9ea018a268e5\" (UID: \"62419df2-740b-473d-8fff-9ea018a268e5\") " Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.984589 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c8pcv\" (UniqueName: \"kubernetes.io/projected/7369dbac-285b-4322-8322-41b1b450d199-kube-api-access-c8pcv\") pod \"7369dbac-285b-4322-8322-41b1b450d199\" (UID: \"7369dbac-285b-4322-8322-41b1b450d199\") " Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.984612 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7369dbac-285b-4322-8322-41b1b450d199-combined-ca-bundle\") pod \"7369dbac-285b-4322-8322-41b1b450d199\" (UID: \"7369dbac-285b-4322-8322-41b1b450d199\") " Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.984658 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/16b38c07-3cc7-45b6-9145-514af8206bdb-dns-svc\") pod \"16b38c07-3cc7-45b6-9145-514af8206bdb\" (UID: \"16b38c07-3cc7-45b6-9145-514af8206bdb\") " Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.984694 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7369dbac-285b-4322-8322-41b1b450d199-etc-machine-id\") pod \"7369dbac-285b-4322-8322-41b1b450d199\" (UID: \"7369dbac-285b-4322-8322-41b1b450d199\") " Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.984736 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62419df2-740b-473d-8fff-9ea018a268e5-combined-ca-bundle\") pod \"62419df2-740b-473d-8fff-9ea018a268e5\" (UID: \"62419df2-740b-473d-8fff-9ea018a268e5\") " Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.984770 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7369dbac-285b-4322-8322-41b1b450d199-scripts\") pod \"7369dbac-285b-4322-8322-41b1b450d199\" (UID: \"7369dbac-285b-4322-8322-41b1b450d199\") " Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.984806 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/16b38c07-3cc7-45b6-9145-514af8206bdb-ovsdbserver-sb\") pod \"16b38c07-3cc7-45b6-9145-514af8206bdb\" (UID: \"16b38c07-3cc7-45b6-9145-514af8206bdb\") " Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.984855 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/62419df2-740b-473d-8fff-9ea018a268e5-scripts\") pod \"62419df2-740b-473d-8fff-9ea018a268e5\" (UID: \"62419df2-740b-473d-8fff-9ea018a268e5\") " Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.984896 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/16b38c07-3cc7-45b6-9145-514af8206bdb-config\") pod \"16b38c07-3cc7-45b6-9145-514af8206bdb\" (UID: \"16b38c07-3cc7-45b6-9145-514af8206bdb\") " Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.984955 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/62419df2-740b-473d-8fff-9ea018a268e5-ovsdb-rundir\") pod \"62419df2-740b-473d-8fff-9ea018a268e5\" (UID: \"62419df2-740b-473d-8fff-9ea018a268e5\") " Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.984984 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wpf76\" (UniqueName: \"kubernetes.io/projected/16b38c07-3cc7-45b6-9145-514af8206bdb-kube-api-access-wpf76\") pod \"16b38c07-3cc7-45b6-9145-514af8206bdb\" (UID: \"16b38c07-3cc7-45b6-9145-514af8206bdb\") " Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.985030 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7369dbac-285b-4322-8322-41b1b450d199-config-data\") pod \"7369dbac-285b-4322-8322-41b1b450d199\" (UID: \"7369dbac-285b-4322-8322-41b1b450d199\") " Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.985078 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/16b38c07-3cc7-45b6-9145-514af8206bdb-dns-swift-storage-0\") pod \"16b38c07-3cc7-45b6-9145-514af8206bdb\" (UID: \"16b38c07-3cc7-45b6-9145-514af8206bdb\") " Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.985106 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/16b38c07-3cc7-45b6-9145-514af8206bdb-ovsdbserver-nb\") pod \"16b38c07-3cc7-45b6-9145-514af8206bdb\" (UID: \"16b38c07-3cc7-45b6-9145-514af8206bdb\") " Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.985137 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/62419df2-740b-473d-8fff-9ea018a268e5-ovsdbserver-nb-tls-certs\") pod \"62419df2-740b-473d-8fff-9ea018a268e5\" (UID: \"62419df2-740b-473d-8fff-9ea018a268e5\") " Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.985208 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndbcluster-nb-etc-ovn\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"62419df2-740b-473d-8fff-9ea018a268e5\" (UID: \"62419df2-740b-473d-8fff-9ea018a268e5\") " Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.992072 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "ovndbcluster-nb-etc-ovn") pod "62419df2-740b-473d-8fff-9ea018a268e5" (UID: "62419df2-740b-473d-8fff-9ea018a268e5"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.995361 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-48eb-account-create-update-scv9x"] Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.995950 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/62419df2-740b-473d-8fff-9ea018a268e5-scripts" (OuterVolumeSpecName: "scripts") pod "62419df2-740b-473d-8fff-9ea018a268e5" (UID: "62419df2-740b-473d-8fff-9ea018a268e5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.997146 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7369dbac-285b-4322-8322-41b1b450d199-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "7369dbac-285b-4322-8322-41b1b450d199" (UID: "7369dbac-285b-4322-8322-41b1b450d199"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 15:48:54 crc kubenswrapper[5021]: I0121 15:48:54.998218 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/62419df2-740b-473d-8fff-9ea018a268e5-ovsdb-rundir" (OuterVolumeSpecName: "ovsdb-rundir") pod "62419df2-740b-473d-8fff-9ea018a268e5" (UID: "62419df2-740b-473d-8fff-9ea018a268e5"). InnerVolumeSpecName "ovsdb-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.005354 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-northd-0"] Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.006049 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/62419df2-740b-473d-8fff-9ea018a268e5-config" (OuterVolumeSpecName: "config") pod "62419df2-740b-473d-8fff-9ea018a268e5" (UID: "62419df2-740b-473d-8fff-9ea018a268e5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.008960 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-northd-0"] Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.017650 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7369dbac-285b-4322-8322-41b1b450d199-kube-api-access-c8pcv" (OuterVolumeSpecName: "kube-api-access-c8pcv") pod "7369dbac-285b-4322-8322-41b1b450d199" (UID: "7369dbac-285b-4322-8322-41b1b450d199"). InnerVolumeSpecName "kube-api-access-c8pcv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.025111 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7369dbac-285b-4322-8322-41b1b450d199-scripts" (OuterVolumeSpecName: "scripts") pod "7369dbac-285b-4322-8322-41b1b450d199" (UID: "7369dbac-285b-4322-8322-41b1b450d199"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.028755 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-metrics-c764x"] Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.029120 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/62419df2-740b-473d-8fff-9ea018a268e5-kube-api-access-6nxx4" (OuterVolumeSpecName: "kube-api-access-6nxx4") pod "62419df2-740b-473d-8fff-9ea018a268e5" (UID: "62419df2-740b-473d-8fff-9ea018a268e5"). InnerVolumeSpecName "kube-api-access-6nxx4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.031702 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7369dbac-285b-4322-8322-41b1b450d199-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "7369dbac-285b-4322-8322-41b1b450d199" (UID: "7369dbac-285b-4322-8322-41b1b450d199"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.032170 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/16b38c07-3cc7-45b6-9145-514af8206bdb-kube-api-access-wpf76" (OuterVolumeSpecName: "kube-api-access-wpf76") pod "16b38c07-3cc7-45b6-9145-514af8206bdb" (UID: "16b38c07-3cc7-45b6-9145-514af8206bdb"). InnerVolumeSpecName "kube-api-access-wpf76". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.036222 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-metrics-c764x"] Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.041090 5021 scope.go:117] "RemoveContainer" containerID="68cab115728d1c091f3a993f8259c122399204e30f7f18e3a2170fa8fd30b98f" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.057166 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/16b38c07-3cc7-45b6-9145-514af8206bdb-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "16b38c07-3cc7-45b6-9145-514af8206bdb" (UID: "16b38c07-3cc7-45b6-9145-514af8206bdb"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.089059 5021 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.089098 5021 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/62419df2-740b-473d-8fff-9ea018a268e5-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.089112 5021 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7369dbac-285b-4322-8322-41b1b450d199-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.089126 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6nxx4\" (UniqueName: \"kubernetes.io/projected/62419df2-740b-473d-8fff-9ea018a268e5-kube-api-access-6nxx4\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.089152 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c8pcv\" (UniqueName: \"kubernetes.io/projected/7369dbac-285b-4322-8322-41b1b450d199-kube-api-access-c8pcv\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.089164 5021 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/16b38c07-3cc7-45b6-9145-514af8206bdb-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.089177 5021 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7369dbac-285b-4322-8322-41b1b450d199-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.089187 5021 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7369dbac-285b-4322-8322-41b1b450d199-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.089261 5021 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/62419df2-740b-473d-8fff-9ea018a268e5-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.089272 5021 reconciler_common.go:293] "Volume detached for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/62419df2-740b-473d-8fff-9ea018a268e5-ovsdb-rundir\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.089283 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wpf76\" (UniqueName: \"kubernetes.io/projected/16b38c07-3cc7-45b6-9145-514af8206bdb-kube-api-access-wpf76\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.093384 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62419df2-740b-473d-8fff-9ea018a268e5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "62419df2-740b-473d-8fff-9ea018a268e5" (UID: "62419df2-740b-473d-8fff-9ea018a268e5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.098702 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-zl2nk"] Jan 21 15:48:55 crc kubenswrapper[5021]: E0121 15:48:55.099244 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7369dbac-285b-4322-8322-41b1b450d199" containerName="probe" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.099307 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="7369dbac-285b-4322-8322-41b1b450d199" containerName="probe" Jan 21 15:48:55 crc kubenswrapper[5021]: E0121 15:48:55.099361 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16b38c07-3cc7-45b6-9145-514af8206bdb" containerName="init" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.099407 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="16b38c07-3cc7-45b6-9145-514af8206bdb" containerName="init" Jan 21 15:48:55 crc kubenswrapper[5021]: E0121 15:48:55.099475 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e360fedf-3856-4f89-980d-f5f282e2f696" containerName="ovn-northd" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.099522 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="e360fedf-3856-4f89-980d-f5f282e2f696" containerName="ovn-northd" Jan 21 15:48:55 crc kubenswrapper[5021]: E0121 15:48:55.099579 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0018f301-49d2-4884-abf4-23b4687de8fd" containerName="openstack-network-exporter" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.099628 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="0018f301-49d2-4884-abf4-23b4687de8fd" containerName="openstack-network-exporter" Jan 21 15:48:55 crc kubenswrapper[5021]: E0121 15:48:55.099677 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7369dbac-285b-4322-8322-41b1b450d199" containerName="cinder-scheduler" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.099722 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="7369dbac-285b-4322-8322-41b1b450d199" containerName="cinder-scheduler" Jan 21 15:48:55 crc kubenswrapper[5021]: E0121 15:48:55.099786 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16b38c07-3cc7-45b6-9145-514af8206bdb" containerName="dnsmasq-dns" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.099834 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="16b38c07-3cc7-45b6-9145-514af8206bdb" containerName="dnsmasq-dns" Jan 21 15:48:55 crc kubenswrapper[5021]: E0121 15:48:55.099884 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62419df2-740b-473d-8fff-9ea018a268e5" containerName="openstack-network-exporter" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.099950 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="62419df2-740b-473d-8fff-9ea018a268e5" containerName="openstack-network-exporter" Jan 21 15:48:55 crc kubenswrapper[5021]: E0121 15:48:55.100005 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e624ae4-b10e-41c8-a09d-9b81cc213cf6" containerName="nova-cell1-novncproxy-novncproxy" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.100051 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e624ae4-b10e-41c8-a09d-9b81cc213cf6" containerName="nova-cell1-novncproxy-novncproxy" Jan 21 15:48:55 crc kubenswrapper[5021]: E0121 15:48:55.100109 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62419df2-740b-473d-8fff-9ea018a268e5" containerName="ovsdbserver-nb" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.100199 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="62419df2-740b-473d-8fff-9ea018a268e5" containerName="ovsdbserver-nb" Jan 21 15:48:55 crc kubenswrapper[5021]: E0121 15:48:55.100256 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e360fedf-3856-4f89-980d-f5f282e2f696" containerName="openstack-network-exporter" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.100307 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="e360fedf-3856-4f89-980d-f5f282e2f696" containerName="openstack-network-exporter" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.100521 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="62419df2-740b-473d-8fff-9ea018a268e5" containerName="ovsdbserver-nb" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.100580 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="16b38c07-3cc7-45b6-9145-514af8206bdb" containerName="dnsmasq-dns" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.100633 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="62419df2-740b-473d-8fff-9ea018a268e5" containerName="openstack-network-exporter" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.100694 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="0018f301-49d2-4884-abf4-23b4687de8fd" containerName="openstack-network-exporter" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.100748 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="7369dbac-285b-4322-8322-41b1b450d199" containerName="probe" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.100813 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="e360fedf-3856-4f89-980d-f5f282e2f696" containerName="openstack-network-exporter" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.100862 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="7369dbac-285b-4322-8322-41b1b450d199" containerName="cinder-scheduler" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.100929 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e624ae4-b10e-41c8-a09d-9b81cc213cf6" containerName="nova-cell1-novncproxy-novncproxy" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.100982 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="e360fedf-3856-4f89-980d-f5f282e2f696" containerName="ovn-northd" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.101654 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-zl2nk" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.107300 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-mariadb-root-db-secret" Jan 21 15:48:55 crc kubenswrapper[5021]: W0121 15:48:55.117616 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod52d192b4_971e_419a_8c85_cf70066656e7.slice/crio-3860ca94cea7159c12cde31be0406ea7a93d7c5b39f77c68305cbae433229292 WatchSource:0}: Error finding container 3860ca94cea7159c12cde31be0406ea7a93d7c5b39f77c68305cbae433229292: Status 404 returned error can't find the container with id 3860ca94cea7159c12cde31be0406ea7a93d7c5b39f77c68305cbae433229292 Jan 21 15:48:55 crc kubenswrapper[5021]: E0121 15:48:55.122391 5021 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 21 15:48:55 crc kubenswrapper[5021]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[/bin/sh -c #!/bin/bash Jan 21 15:48:55 crc kubenswrapper[5021]: Jan 21 15:48:55 crc kubenswrapper[5021]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 21 15:48:55 crc kubenswrapper[5021]: Jan 21 15:48:55 crc kubenswrapper[5021]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 21 15:48:55 crc kubenswrapper[5021]: Jan 21 15:48:55 crc kubenswrapper[5021]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 21 15:48:55 crc kubenswrapper[5021]: Jan 21 15:48:55 crc kubenswrapper[5021]: if [ -n "nova_cell0" ]; then Jan 21 15:48:55 crc kubenswrapper[5021]: GRANT_DATABASE="nova_cell0" Jan 21 15:48:55 crc kubenswrapper[5021]: else Jan 21 15:48:55 crc kubenswrapper[5021]: GRANT_DATABASE="*" Jan 21 15:48:55 crc kubenswrapper[5021]: fi Jan 21 15:48:55 crc kubenswrapper[5021]: Jan 21 15:48:55 crc kubenswrapper[5021]: # going for maximum compatibility here: Jan 21 15:48:55 crc kubenswrapper[5021]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 21 15:48:55 crc kubenswrapper[5021]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 21 15:48:55 crc kubenswrapper[5021]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 21 15:48:55 crc kubenswrapper[5021]: # support updates Jan 21 15:48:55 crc kubenswrapper[5021]: Jan 21 15:48:55 crc kubenswrapper[5021]: $MYSQL_CMD < logger="UnhandledError" Jan 21 15:48:55 crc kubenswrapper[5021]: E0121 15:48:55.123168 5021 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 21 15:48:55 crc kubenswrapper[5021]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[/bin/sh -c #!/bin/bash Jan 21 15:48:55 crc kubenswrapper[5021]: Jan 21 15:48:55 crc kubenswrapper[5021]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 21 15:48:55 crc kubenswrapper[5021]: Jan 21 15:48:55 crc kubenswrapper[5021]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 21 15:48:55 crc kubenswrapper[5021]: Jan 21 15:48:55 crc kubenswrapper[5021]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 21 15:48:55 crc kubenswrapper[5021]: Jan 21 15:48:55 crc kubenswrapper[5021]: if [ -n "nova_api" ]; then Jan 21 15:48:55 crc kubenswrapper[5021]: GRANT_DATABASE="nova_api" Jan 21 15:48:55 crc kubenswrapper[5021]: else Jan 21 15:48:55 crc kubenswrapper[5021]: GRANT_DATABASE="*" Jan 21 15:48:55 crc kubenswrapper[5021]: fi Jan 21 15:48:55 crc kubenswrapper[5021]: Jan 21 15:48:55 crc kubenswrapper[5021]: # going for maximum compatibility here: Jan 21 15:48:55 crc kubenswrapper[5021]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 21 15:48:55 crc kubenswrapper[5021]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 21 15:48:55 crc kubenswrapper[5021]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 21 15:48:55 crc kubenswrapper[5021]: # support updates Jan 21 15:48:55 crc kubenswrapper[5021]: Jan 21 15:48:55 crc kubenswrapper[5021]: $MYSQL_CMD < logger="UnhandledError" Jan 21 15:48:55 crc kubenswrapper[5021]: E0121 15:48:55.126934 5021 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 21 15:48:55 crc kubenswrapper[5021]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[/bin/sh -c #!/bin/bash Jan 21 15:48:55 crc kubenswrapper[5021]: Jan 21 15:48:55 crc kubenswrapper[5021]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 21 15:48:55 crc kubenswrapper[5021]: Jan 21 15:48:55 crc kubenswrapper[5021]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 21 15:48:55 crc kubenswrapper[5021]: Jan 21 15:48:55 crc kubenswrapper[5021]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 21 15:48:55 crc kubenswrapper[5021]: Jan 21 15:48:55 crc kubenswrapper[5021]: if [ -n "nova_cell1" ]; then Jan 21 15:48:55 crc kubenswrapper[5021]: GRANT_DATABASE="nova_cell1" Jan 21 15:48:55 crc kubenswrapper[5021]: else Jan 21 15:48:55 crc kubenswrapper[5021]: GRANT_DATABASE="*" Jan 21 15:48:55 crc kubenswrapper[5021]: fi Jan 21 15:48:55 crc kubenswrapper[5021]: Jan 21 15:48:55 crc kubenswrapper[5021]: # going for maximum compatibility here: Jan 21 15:48:55 crc kubenswrapper[5021]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 21 15:48:55 crc kubenswrapper[5021]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 21 15:48:55 crc kubenswrapper[5021]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 21 15:48:55 crc kubenswrapper[5021]: # support updates Jan 21 15:48:55 crc kubenswrapper[5021]: Jan 21 15:48:55 crc kubenswrapper[5021]: $MYSQL_CMD < logger="UnhandledError" Jan 21 15:48:55 crc kubenswrapper[5021]: E0121 15:48:55.127509 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"nova-api-db-secret\\\" not found\"" pod="openstack/nova-api-48eb-account-create-update-scv9x" podUID="dba40158-ac19-4635-8e0d-c97ab15f65bf" Jan 21 15:48:55 crc kubenswrapper[5021]: E0121 15:48:55.127548 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"nova-cell0-db-secret\\\" not found\"" pod="openstack/nova-cell0-1670-account-create-update-5jkx8" podUID="317e6b88-faf8-418a-8036-79ec4dacd19e" Jan 21 15:48:55 crc kubenswrapper[5021]: E0121 15:48:55.128197 5021 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 21 15:48:55 crc kubenswrapper[5021]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[/bin/sh -c #!/bin/bash Jan 21 15:48:55 crc kubenswrapper[5021]: Jan 21 15:48:55 crc kubenswrapper[5021]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 21 15:48:55 crc kubenswrapper[5021]: Jan 21 15:48:55 crc kubenswrapper[5021]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 21 15:48:55 crc kubenswrapper[5021]: Jan 21 15:48:55 crc kubenswrapper[5021]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 21 15:48:55 crc kubenswrapper[5021]: Jan 21 15:48:55 crc kubenswrapper[5021]: if [ -n "barbican" ]; then Jan 21 15:48:55 crc kubenswrapper[5021]: GRANT_DATABASE="barbican" Jan 21 15:48:55 crc kubenswrapper[5021]: else Jan 21 15:48:55 crc kubenswrapper[5021]: GRANT_DATABASE="*" Jan 21 15:48:55 crc kubenswrapper[5021]: fi Jan 21 15:48:55 crc kubenswrapper[5021]: Jan 21 15:48:55 crc kubenswrapper[5021]: # going for maximum compatibility here: Jan 21 15:48:55 crc kubenswrapper[5021]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 21 15:48:55 crc kubenswrapper[5021]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 21 15:48:55 crc kubenswrapper[5021]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 21 15:48:55 crc kubenswrapper[5021]: # support updates Jan 21 15:48:55 crc kubenswrapper[5021]: Jan 21 15:48:55 crc kubenswrapper[5021]: $MYSQL_CMD < logger="UnhandledError" Jan 21 15:48:55 crc kubenswrapper[5021]: E0121 15:48:55.130209 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"nova-cell1-db-secret\\\" not found\"" pod="openstack/nova-cell1-4f73-account-create-update-c2q4j" podUID="8b26760a-85cf-4259-affd-9fa52e3766fe" Jan 21 15:48:55 crc kubenswrapper[5021]: E0121 15:48:55.130822 5021 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 21 15:48:55 crc kubenswrapper[5021]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[/bin/sh -c #!/bin/bash Jan 21 15:48:55 crc kubenswrapper[5021]: Jan 21 15:48:55 crc kubenswrapper[5021]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 21 15:48:55 crc kubenswrapper[5021]: Jan 21 15:48:55 crc kubenswrapper[5021]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 21 15:48:55 crc kubenswrapper[5021]: Jan 21 15:48:55 crc kubenswrapper[5021]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 21 15:48:55 crc kubenswrapper[5021]: Jan 21 15:48:55 crc kubenswrapper[5021]: if [ -n "" ]; then Jan 21 15:48:55 crc kubenswrapper[5021]: GRANT_DATABASE="" Jan 21 15:48:55 crc kubenswrapper[5021]: else Jan 21 15:48:55 crc kubenswrapper[5021]: GRANT_DATABASE="*" Jan 21 15:48:55 crc kubenswrapper[5021]: fi Jan 21 15:48:55 crc kubenswrapper[5021]: Jan 21 15:48:55 crc kubenswrapper[5021]: # going for maximum compatibility here: Jan 21 15:48:55 crc kubenswrapper[5021]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 21 15:48:55 crc kubenswrapper[5021]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 21 15:48:55 crc kubenswrapper[5021]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 21 15:48:55 crc kubenswrapper[5021]: # support updates Jan 21 15:48:55 crc kubenswrapper[5021]: Jan 21 15:48:55 crc kubenswrapper[5021]: $MYSQL_CMD < logger="UnhandledError" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.133024 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-zl2nk"] Jan 21 15:48:55 crc kubenswrapper[5021]: E0121 15:48:55.133111 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"barbican-db-secret\\\" not found\"" pod="openstack/barbican-10e4-account-create-update-ngtt5" podUID="edf8a635-556a-46dc-82a4-68a4d40a7381" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.135614 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/16b38c07-3cc7-45b6-9145-514af8206bdb-config" (OuterVolumeSpecName: "config") pod "16b38c07-3cc7-45b6-9145-514af8206bdb" (UID: "16b38c07-3cc7-45b6-9145-514af8206bdb"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.138645 5021 scope.go:117] "RemoveContainer" containerID="7a16782bd143894b1bf2e2108f8b5bf024d72b94f1b3f7fcd30087ce2e16d557" Jan 21 15:48:55 crc kubenswrapper[5021]: E0121 15:48:55.138647 5021 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 21 15:48:55 crc kubenswrapper[5021]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[/bin/sh -c #!/bin/bash Jan 21 15:48:55 crc kubenswrapper[5021]: Jan 21 15:48:55 crc kubenswrapper[5021]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 21 15:48:55 crc kubenswrapper[5021]: Jan 21 15:48:55 crc kubenswrapper[5021]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 21 15:48:55 crc kubenswrapper[5021]: Jan 21 15:48:55 crc kubenswrapper[5021]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 21 15:48:55 crc kubenswrapper[5021]: Jan 21 15:48:55 crc kubenswrapper[5021]: if [ -n "placement" ]; then Jan 21 15:48:55 crc kubenswrapper[5021]: GRANT_DATABASE="placement" Jan 21 15:48:55 crc kubenswrapper[5021]: else Jan 21 15:48:55 crc kubenswrapper[5021]: GRANT_DATABASE="*" Jan 21 15:48:55 crc kubenswrapper[5021]: fi Jan 21 15:48:55 crc kubenswrapper[5021]: Jan 21 15:48:55 crc kubenswrapper[5021]: # going for maximum compatibility here: Jan 21 15:48:55 crc kubenswrapper[5021]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 21 15:48:55 crc kubenswrapper[5021]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 21 15:48:55 crc kubenswrapper[5021]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 21 15:48:55 crc kubenswrapper[5021]: # support updates Jan 21 15:48:55 crc kubenswrapper[5021]: Jan 21 15:48:55 crc kubenswrapper[5021]: $MYSQL_CMD < logger="UnhandledError" Jan 21 15:48:55 crc kubenswrapper[5021]: E0121 15:48:55.139167 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"openstack-cell1-mariadb-root-db-secret\\\" not found\"" pod="openstack/root-account-create-update-mpfp7" podUID="a157b13a-50bf-4c22-805f-cd042780925c" Jan 21 15:48:55 crc kubenswrapper[5021]: E0121 15:48:55.142820 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"placement-db-secret\\\" not found\"" pod="openstack/placement-0608-account-create-update-m8cb5" podUID="52d192b4-971e-419a-8c85-cf70066656e7" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.185304 5021 scope.go:117] "RemoveContainer" containerID="7a16782bd143894b1bf2e2108f8b5bf024d72b94f1b3f7fcd30087ce2e16d557" Jan 21 15:48:55 crc kubenswrapper[5021]: E0121 15:48:55.185727 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7a16782bd143894b1bf2e2108f8b5bf024d72b94f1b3f7fcd30087ce2e16d557\": container with ID starting with 7a16782bd143894b1bf2e2108f8b5bf024d72b94f1b3f7fcd30087ce2e16d557 not found: ID does not exist" containerID="7a16782bd143894b1bf2e2108f8b5bf024d72b94f1b3f7fcd30087ce2e16d557" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.185753 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7a16782bd143894b1bf2e2108f8b5bf024d72b94f1b3f7fcd30087ce2e16d557"} err="failed to get container status \"7a16782bd143894b1bf2e2108f8b5bf024d72b94f1b3f7fcd30087ce2e16d557\": rpc error: code = NotFound desc = could not find container \"7a16782bd143894b1bf2e2108f8b5bf024d72b94f1b3f7fcd30087ce2e16d557\": container with ID starting with 7a16782bd143894b1bf2e2108f8b5bf024d72b94f1b3f7fcd30087ce2e16d557 not found: ID does not exist" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.186968 5021 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.191310 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/270b5844-50a8-4a16-8c12-73c4c209aab1-operator-scripts\") pod \"root-account-create-update-zl2nk\" (UID: \"270b5844-50a8-4a16-8c12-73c4c209aab1\") " pod="openstack/root-account-create-update-zl2nk" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.191390 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-shw64\" (UniqueName: \"kubernetes.io/projected/270b5844-50a8-4a16-8c12-73c4c209aab1-kube-api-access-shw64\") pod \"root-account-create-update-zl2nk\" (UID: \"270b5844-50a8-4a16-8c12-73c4c209aab1\") " pod="openstack/root-account-create-update-zl2nk" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.191630 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62419df2-740b-473d-8fff-9ea018a268e5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.191649 5021 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/16b38c07-3cc7-45b6-9145-514af8206bdb-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.191661 5021 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.198814 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/16b38c07-3cc7-45b6-9145-514af8206bdb-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "16b38c07-3cc7-45b6-9145-514af8206bdb" (UID: "16b38c07-3cc7-45b6-9145-514af8206bdb"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.205938 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7369dbac-285b-4322-8322-41b1b450d199-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7369dbac-285b-4322-8322-41b1b450d199" (UID: "7369dbac-285b-4322-8322-41b1b450d199"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.206522 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/16b38c07-3cc7-45b6-9145-514af8206bdb-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "16b38c07-3cc7-45b6-9145-514af8206bdb" (UID: "16b38c07-3cc7-45b6-9145-514af8206bdb"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.211047 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/16b38c07-3cc7-45b6-9145-514af8206bdb-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "16b38c07-3cc7-45b6-9145-514af8206bdb" (UID: "16b38c07-3cc7-45b6-9145-514af8206bdb"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.228175 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62419df2-740b-473d-8fff-9ea018a268e5-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "62419df2-740b-473d-8fff-9ea018a268e5" (UID: "62419df2-740b-473d-8fff-9ea018a268e5"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.234949 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7369dbac-285b-4322-8322-41b1b450d199-config-data" (OuterVolumeSpecName: "config-data") pod "7369dbac-285b-4322-8322-41b1b450d199" (UID: "7369dbac-285b-4322-8322-41b1b450d199"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.255260 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62419df2-740b-473d-8fff-9ea018a268e5-ovsdbserver-nb-tls-certs" (OuterVolumeSpecName: "ovsdbserver-nb-tls-certs") pod "62419df2-740b-473d-8fff-9ea018a268e5" (UID: "62419df2-740b-473d-8fff-9ea018a268e5"). InnerVolumeSpecName "ovsdbserver-nb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.293078 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/270b5844-50a8-4a16-8c12-73c4c209aab1-operator-scripts\") pod \"root-account-create-update-zl2nk\" (UID: \"270b5844-50a8-4a16-8c12-73c4c209aab1\") " pod="openstack/root-account-create-update-zl2nk" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.293146 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-shw64\" (UniqueName: \"kubernetes.io/projected/270b5844-50a8-4a16-8c12-73c4c209aab1-kube-api-access-shw64\") pod \"root-account-create-update-zl2nk\" (UID: \"270b5844-50a8-4a16-8c12-73c4c209aab1\") " pod="openstack/root-account-create-update-zl2nk" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.293227 5021 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7369dbac-285b-4322-8322-41b1b450d199-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.293240 5021 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/16b38c07-3cc7-45b6-9145-514af8206bdb-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.293249 5021 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/16b38c07-3cc7-45b6-9145-514af8206bdb-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.293260 5021 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/62419df2-740b-473d-8fff-9ea018a268e5-ovsdbserver-nb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.293280 5021 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/62419df2-740b-473d-8fff-9ea018a268e5-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.293295 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7369dbac-285b-4322-8322-41b1b450d199-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.293306 5021 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/16b38c07-3cc7-45b6-9145-514af8206bdb-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.293769 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/270b5844-50a8-4a16-8c12-73c4c209aab1-operator-scripts\") pod \"root-account-create-update-zl2nk\" (UID: \"270b5844-50a8-4a16-8c12-73c4c209aab1\") " pod="openstack/root-account-create-update-zl2nk" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.310154 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-shw64\" (UniqueName: \"kubernetes.io/projected/270b5844-50a8-4a16-8c12-73c4c209aab1-kube-api-access-shw64\") pod \"root-account-create-update-zl2nk\" (UID: \"270b5844-50a8-4a16-8c12-73c4c209aab1\") " pod="openstack/root-account-create-update-zl2nk" Jan 21 15:48:55 crc kubenswrapper[5021]: E0121 15:48:55.396082 5021 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Jan 21 15:48:55 crc kubenswrapper[5021]: E0121 15:48:55.396165 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-config-data podName:2dff28e1-6d0f-4a7d-8fcf-0edf26e63825 nodeName:}" failed. No retries permitted until 2026-01-21 15:48:59.396145729 +0000 UTC m=+1480.931259618 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-config-data") pod "rabbitmq-cell1-server-0" (UID: "2dff28e1-6d0f-4a7d-8fcf-0edf26e63825") : configmap "rabbitmq-cell1-config-data" not found Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.436176 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-zl2nk" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.471159 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_70ec8329-7d58-465c-9234-7e4543fe4538/ovsdbserver-sb/0.log" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.471240 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 21 15:48:55 crc kubenswrapper[5021]: E0121 15:48:55.497983 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c6c487064a50a13818b80449ec45c315dcbd4c10322323fbef91c6b9e74ca755" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 21 15:48:55 crc kubenswrapper[5021]: E0121 15:48:55.504866 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c6c487064a50a13818b80449ec45c315dcbd4c10322323fbef91c6b9e74ca755" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 21 15:48:55 crc kubenswrapper[5021]: E0121 15:48:55.509219 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c6c487064a50a13818b80449ec45c315dcbd4c10322323fbef91c6b9e74ca755" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 21 15:48:55 crc kubenswrapper[5021]: E0121 15:48:55.509277 5021 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell1-conductor-0" podUID="a6ed9dcf-812f-4945-ac9d-43839bb27349" containerName="nova-cell1-conductor-conductor" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.573673 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_62419df2-740b-473d-8fff-9ea018a268e5/ovsdbserver-nb/0.log" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.574052 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"62419df2-740b-473d-8fff-9ea018a268e5","Type":"ContainerDied","Data":"9533da90d4e9d5c4f271a69e4a56dba66a3d361d1ee6caf537d47476018674c4"} Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.574092 5021 scope.go:117] "RemoveContainer" containerID="be5a47b27b20c5b0ab887ff6ead17e4f4a7b05b5ba87488bbbbe349813177d79" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.574192 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.578855 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-10e4-account-create-update-ngtt5" event={"ID":"edf8a635-556a-46dc-82a4-68a4d40a7381","Type":"ContainerStarted","Data":"8d4110520046ac794cecec1ce7cdc3fb0c5f924c9c38e91159c6f6ff53b988dc"} Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.604002 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/70ec8329-7d58-465c-9234-7e4543fe4538-ovsdb-rundir\") pod \"70ec8329-7d58-465c-9234-7e4543fe4538\" (UID: \"70ec8329-7d58-465c-9234-7e4543fe4538\") " Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.604061 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/70ec8329-7d58-465c-9234-7e4543fe4538-scripts\") pod \"70ec8329-7d58-465c-9234-7e4543fe4538\" (UID: \"70ec8329-7d58-465c-9234-7e4543fe4538\") " Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.604159 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70ec8329-7d58-465c-9234-7e4543fe4538-combined-ca-bundle\") pod \"70ec8329-7d58-465c-9234-7e4543fe4538\" (UID: \"70ec8329-7d58-465c-9234-7e4543fe4538\") " Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.604242 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dtjlz\" (UniqueName: \"kubernetes.io/projected/70ec8329-7d58-465c-9234-7e4543fe4538-kube-api-access-dtjlz\") pod \"70ec8329-7d58-465c-9234-7e4543fe4538\" (UID: \"70ec8329-7d58-465c-9234-7e4543fe4538\") " Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.604287 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/70ec8329-7d58-465c-9234-7e4543fe4538-ovsdbserver-sb-tls-certs\") pod \"70ec8329-7d58-465c-9234-7e4543fe4538\" (UID: \"70ec8329-7d58-465c-9234-7e4543fe4538\") " Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.604328 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndbcluster-sb-etc-ovn\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"70ec8329-7d58-465c-9234-7e4543fe4538\" (UID: \"70ec8329-7d58-465c-9234-7e4543fe4538\") " Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.604360 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/70ec8329-7d58-465c-9234-7e4543fe4538-config\") pod \"70ec8329-7d58-465c-9234-7e4543fe4538\" (UID: \"70ec8329-7d58-465c-9234-7e4543fe4538\") " Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.604408 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/70ec8329-7d58-465c-9234-7e4543fe4538-metrics-certs-tls-certs\") pod \"70ec8329-7d58-465c-9234-7e4543fe4538\" (UID: \"70ec8329-7d58-465c-9234-7e4543fe4538\") " Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.606200 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-mpfp7" event={"ID":"a157b13a-50bf-4c22-805f-cd042780925c","Type":"ContainerStarted","Data":"c1d3a031f7c2b8db4994ba4b9675cd7526402e91de2f5574c6ebb5cf89f77015"} Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.607698 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/70ec8329-7d58-465c-9234-7e4543fe4538-ovsdb-rundir" (OuterVolumeSpecName: "ovsdb-rundir") pod "70ec8329-7d58-465c-9234-7e4543fe4538" (UID: "70ec8329-7d58-465c-9234-7e4543fe4538"). InnerVolumeSpecName "ovsdb-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.608155 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/70ec8329-7d58-465c-9234-7e4543fe4538-scripts" (OuterVolumeSpecName: "scripts") pod "70ec8329-7d58-465c-9234-7e4543fe4538" (UID: "70ec8329-7d58-465c-9234-7e4543fe4538"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.610892 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/70ec8329-7d58-465c-9234-7e4543fe4538-config" (OuterVolumeSpecName: "config") pod "70ec8329-7d58-465c-9234-7e4543fe4538" (UID: "70ec8329-7d58-465c-9234-7e4543fe4538"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.618364 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-1670-account-create-update-5jkx8" event={"ID":"317e6b88-faf8-418a-8036-79ec4dacd19e","Type":"ContainerStarted","Data":"9db1c833d6638eeef7a2d65e86d37de67d80a6d5b91fec1dbc9de144feb3b666"} Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.627007 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-4f73-account-create-update-c2q4j" event={"ID":"8b26760a-85cf-4259-affd-9fa52e3766fe","Type":"ContainerStarted","Data":"79ba363f56f749b1e561187f10a6a315b6418702b88191ad8d3707413af9bcdd"} Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.639873 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/70ec8329-7d58-465c-9234-7e4543fe4538-kube-api-access-dtjlz" (OuterVolumeSpecName: "kube-api-access-dtjlz") pod "70ec8329-7d58-465c-9234-7e4543fe4538" (UID: "70ec8329-7d58-465c-9234-7e4543fe4538"). InnerVolumeSpecName "kube-api-access-dtjlz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.643076 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "ovndbcluster-sb-etc-ovn") pod "70ec8329-7d58-465c-9234-7e4543fe4538" (UID: "70ec8329-7d58-465c-9234-7e4543fe4538"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.680849 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/70ec8329-7d58-465c-9234-7e4543fe4538-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "70ec8329-7d58-465c-9234-7e4543fe4538" (UID: "70ec8329-7d58-465c-9234-7e4543fe4538"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.687264 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6c5df9-knczx" event={"ID":"16b38c07-3cc7-45b6-9145-514af8206bdb","Type":"ContainerDied","Data":"9516123ba13cef8727a8fc636d004ffa81acfc0fbf407464c74bc7dfe7471f25"} Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.687585 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c7b6c5df9-knczx" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.707787 5021 reconciler_common.go:293] "Volume detached for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/70ec8329-7d58-465c-9234-7e4543fe4538-ovsdb-rundir\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.708001 5021 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/70ec8329-7d58-465c-9234-7e4543fe4538-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.708057 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70ec8329-7d58-465c-9234-7e4543fe4538-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.708106 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dtjlz\" (UniqueName: \"kubernetes.io/projected/70ec8329-7d58-465c-9234-7e4543fe4538-kube-api-access-dtjlz\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.708167 5021 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.708273 5021 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/70ec8329-7d58-465c-9234-7e4543fe4538-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.717086 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-48eb-account-create-update-scv9x" event={"ID":"dba40158-ac19-4635-8e0d-c97ab15f65bf","Type":"ContainerStarted","Data":"20957605c401031c9c41315516de257224d49b2e01fab96349e127a76af39c2a"} Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.730681 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/70ec8329-7d58-465c-9234-7e4543fe4538-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "70ec8329-7d58-465c-9234-7e4543fe4538" (UID: "70ec8329-7d58-465c-9234-7e4543fe4538"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.750987 5021 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.753076 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_70ec8329-7d58-465c-9234-7e4543fe4538/ovsdbserver-sb/0.log" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.753240 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"70ec8329-7d58-465c-9234-7e4543fe4538","Type":"ContainerDied","Data":"55d6661cfbcbb9b7b4db12c0dcd78baf6444f972a3a81f237f3aa89ffba15094"} Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.753360 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.773130 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-0608-account-create-update-m8cb5" event={"ID":"52d192b4-971e-419a-8c85-cf70066656e7","Type":"ContainerStarted","Data":"3860ca94cea7159c12cde31be0406ea7a93d7c5b39f77c68305cbae433229292"} Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.789332 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/70ec8329-7d58-465c-9234-7e4543fe4538-ovsdbserver-sb-tls-certs" (OuterVolumeSpecName: "ovsdbserver-sb-tls-certs") pod "70ec8329-7d58-465c-9234-7e4543fe4538" (UID: "70ec8329-7d58-465c-9234-7e4543fe4538"). InnerVolumeSpecName "ovsdbserver-sb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.809304 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"7369dbac-285b-4322-8322-41b1b450d199","Type":"ContainerDied","Data":"ea879a2ff02e39787f2c34829176f75371e1c7090f282775f6dc547cc945a7b7"} Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.809402 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.821656 5021 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/70ec8329-7d58-465c-9234-7e4543fe4538-ovsdbserver-sb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.821707 5021 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.821724 5021 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/70ec8329-7d58-465c-9234-7e4543fe4538-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.824318 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 21 15:48:55 crc kubenswrapper[5021]: E0121 15:48:55.874010 5021 handlers.go:78] "Exec lifecycle hook for Container in Pod failed" err=< Jan 21 15:48:55 crc kubenswrapper[5021]: command '/usr/share/ovn/scripts/ovn-ctl stop_controller' exited with 137: 2026-01-21T15:48:53Z|00001|fatal_signal|WARN|terminating with signal 14 (Alarm clock) Jan 21 15:48:55 crc kubenswrapper[5021]: /etc/init.d/functions: line 589: 508 Alarm clock "$@" Jan 21 15:48:55 crc kubenswrapper[5021]: > execCommand=["/usr/share/ovn/scripts/ovn-ctl","stop_controller"] containerName="ovn-controller" pod="openstack/ovn-controller-xqkct" message=< Jan 21 15:48:55 crc kubenswrapper[5021]: Exiting ovn-controller (1) [FAILED] Jan 21 15:48:55 crc kubenswrapper[5021]: Killing ovn-controller (1) [ OK ] Jan 21 15:48:55 crc kubenswrapper[5021]: 2026-01-21T15:48:53Z|00001|fatal_signal|WARN|terminating with signal 14 (Alarm clock) Jan 21 15:48:55 crc kubenswrapper[5021]: /etc/init.d/functions: line 589: 508 Alarm clock "$@" Jan 21 15:48:55 crc kubenswrapper[5021]: > Jan 21 15:48:55 crc kubenswrapper[5021]: E0121 15:48:55.874049 5021 kuberuntime_container.go:691] "PreStop hook failed" err=< Jan 21 15:48:55 crc kubenswrapper[5021]: command '/usr/share/ovn/scripts/ovn-ctl stop_controller' exited with 137: 2026-01-21T15:48:53Z|00001|fatal_signal|WARN|terminating with signal 14 (Alarm clock) Jan 21 15:48:55 crc kubenswrapper[5021]: /etc/init.d/functions: line 589: 508 Alarm clock "$@" Jan 21 15:48:55 crc kubenswrapper[5021]: > pod="openstack/ovn-controller-xqkct" podUID="a5d30216-0406-4ff3-a645-880381c2a661" containerName="ovn-controller" containerID="cri-o://a650fe4d3e5d417b81ce5f0db674f5341402d7b7ced37533a3ae41abbd2df4b7" Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.874084 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-xqkct" podUID="a5d30216-0406-4ff3-a645-880381c2a661" containerName="ovn-controller" containerID="cri-o://a650fe4d3e5d417b81ce5f0db674f5341402d7b7ced37533a3ae41abbd2df4b7" gracePeriod=27 Jan 21 15:48:55 crc kubenswrapper[5021]: I0121 15:48:55.994634 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-7bbf467d99-62cpf" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.005473 5021 scope.go:117] "RemoveContainer" containerID="11ad5f2262a8d76744bb2dbd3bcaec7d5afd1ded3ec49308661f05117d09f401" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.016324 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.029210 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.037067 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.047348 5021 scope.go:117] "RemoveContainer" containerID="2fc2eb7f65b6d060d0dd9c5922dc5172b7314bb18a053569c709b7586f92be06" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.056142 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.065659 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c7b6c5df9-knczx"] Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.074198 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5c7b6c5df9-knczx"] Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.125101 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.128198 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4687452c-74ba-4f3e-ac17-1cf4c2e514d8-combined-ca-bundle\") pod \"4687452c-74ba-4f3e-ac17-1cf4c2e514d8\" (UID: \"4687452c-74ba-4f3e-ac17-1cf4c2e514d8\") " Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.128249 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4687452c-74ba-4f3e-ac17-1cf4c2e514d8-internal-tls-certs\") pod \"4687452c-74ba-4f3e-ac17-1cf4c2e514d8\" (UID: \"4687452c-74ba-4f3e-ac17-1cf4c2e514d8\") " Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.128301 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4687452c-74ba-4f3e-ac17-1cf4c2e514d8-log-httpd\") pod \"4687452c-74ba-4f3e-ac17-1cf4c2e514d8\" (UID: \"4687452c-74ba-4f3e-ac17-1cf4c2e514d8\") " Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.128404 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rf8ff\" (UniqueName: \"kubernetes.io/projected/4687452c-74ba-4f3e-ac17-1cf4c2e514d8-kube-api-access-rf8ff\") pod \"4687452c-74ba-4f3e-ac17-1cf4c2e514d8\" (UID: \"4687452c-74ba-4f3e-ac17-1cf4c2e514d8\") " Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.128428 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4687452c-74ba-4f3e-ac17-1cf4c2e514d8-config-data\") pod \"4687452c-74ba-4f3e-ac17-1cf4c2e514d8\" (UID: \"4687452c-74ba-4f3e-ac17-1cf4c2e514d8\") " Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.128466 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4687452c-74ba-4f3e-ac17-1cf4c2e514d8-run-httpd\") pod \"4687452c-74ba-4f3e-ac17-1cf4c2e514d8\" (UID: \"4687452c-74ba-4f3e-ac17-1cf4c2e514d8\") " Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.128514 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4687452c-74ba-4f3e-ac17-1cf4c2e514d8-public-tls-certs\") pod \"4687452c-74ba-4f3e-ac17-1cf4c2e514d8\" (UID: \"4687452c-74ba-4f3e-ac17-1cf4c2e514d8\") " Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.128532 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4687452c-74ba-4f3e-ac17-1cf4c2e514d8-etc-swift\") pod \"4687452c-74ba-4f3e-ac17-1cf4c2e514d8\" (UID: \"4687452c-74ba-4f3e-ac17-1cf4c2e514d8\") " Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.130037 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4687452c-74ba-4f3e-ac17-1cf4c2e514d8-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "4687452c-74ba-4f3e-ac17-1cf4c2e514d8" (UID: "4687452c-74ba-4f3e-ac17-1cf4c2e514d8"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.130799 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4687452c-74ba-4f3e-ac17-1cf4c2e514d8-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "4687452c-74ba-4f3e-ac17-1cf4c2e514d8" (UID: "4687452c-74ba-4f3e-ac17-1cf4c2e514d8"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.138411 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4687452c-74ba-4f3e-ac17-1cf4c2e514d8-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "4687452c-74ba-4f3e-ac17-1cf4c2e514d8" (UID: "4687452c-74ba-4f3e-ac17-1cf4c2e514d8"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.138558 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4687452c-74ba-4f3e-ac17-1cf4c2e514d8-kube-api-access-rf8ff" (OuterVolumeSpecName: "kube-api-access-rf8ff") pod "4687452c-74ba-4f3e-ac17-1cf4c2e514d8" (UID: "4687452c-74ba-4f3e-ac17-1cf4c2e514d8"). InnerVolumeSpecName "kube-api-access-rf8ff". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.138596 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.150708 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.151165 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="55bbd00b-56a2-42a4-a75a-39daba5e3ba6" containerName="ceilometer-central-agent" containerID="cri-o://5037559fb60350e2158a6a5f376df3b44cba445783affafde419d451bd46ca8e" gracePeriod=30 Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.151540 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="55bbd00b-56a2-42a4-a75a-39daba5e3ba6" containerName="proxy-httpd" containerID="cri-o://9e0d0434088b24ecb2f7a6a737f0cc34be54ffb9cbf7f5f7696a923ceacf6bc2" gracePeriod=30 Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.151774 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="55bbd00b-56a2-42a4-a75a-39daba5e3ba6" containerName="sg-core" containerID="cri-o://9e7ddce4cf8d1e90155d69318107e51e50dde636777370df6d9675e6e804665e" gracePeriod=30 Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.151843 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="55bbd00b-56a2-42a4-a75a-39daba5e3ba6" containerName="ceilometer-notification-agent" containerID="cri-o://8d425e98d39b82e8ff834275afbbd09d41f458b740a780db59954fa6251d14e5" gracePeriod=30 Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.169490 5021 scope.go:117] "RemoveContainer" containerID="e76dec9ec11e29552fbf6fce215e84ba99770f62744ce04725833b8e21d00d65" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.200850 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.201300 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="5073fbf8-f2ef-49e7-8b07-d90b1822b414" containerName="kube-state-metrics" containerID="cri-o://81466562175ec89583498029b73cadcc8d26846a5fbb385a68626b0ba993a0c2" gracePeriod=30 Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.235361 5021 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4687452c-74ba-4f3e-ac17-1cf4c2e514d8-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.235397 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rf8ff\" (UniqueName: \"kubernetes.io/projected/4687452c-74ba-4f3e-ac17-1cf4c2e514d8-kube-api-access-rf8ff\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.235409 5021 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4687452c-74ba-4f3e-ac17-1cf4c2e514d8-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.235417 5021 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4687452c-74ba-4f3e-ac17-1cf4c2e514d8-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.245034 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-10e4-account-create-update-ngtt5" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.257430 5021 scope.go:117] "RemoveContainer" containerID="00dc1a70c5582842a5b18750882608ff55e28ecac0f1421cc5d2e9d1a3cd1b00" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.357543 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4687452c-74ba-4f3e-ac17-1cf4c2e514d8-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "4687452c-74ba-4f3e-ac17-1cf4c2e514d8" (UID: "4687452c-74ba-4f3e-ac17-1cf4c2e514d8"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.387959 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-0c6d-account-create-update-glfz4"] Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.397113 5021 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="7ccf7211-3a03-41f1-839a-7bda93e55d4b" containerName="cinder-api" probeResult="failure" output="Get \"https://10.217.0.166:8776/healthcheck\": dial tcp 10.217.0.166:8776: connect: connection refused" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.397480 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4687452c-74ba-4f3e-ac17-1cf4c2e514d8-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "4687452c-74ba-4f3e-ac17-1cf4c2e514d8" (UID: "4687452c-74ba-4f3e-ac17-1cf4c2e514d8"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.399321 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/memcached-0"] Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.399547 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/memcached-0" podUID="fb584a2d-b396-4850-a7b5-3d827c42fe5a" containerName="memcached" containerID="cri-o://9d81dbb84baa56528fb5b7d80ccd46ca92f10dbc96d74d445172433eb0f4dc44" gracePeriod=30 Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.404892 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-0c6d-account-create-update-glfz4"] Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.418500 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-0c6d-account-create-update-7tq5h"] Jan 21 15:48:56 crc kubenswrapper[5021]: E0121 15:48:56.419015 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="70ec8329-7d58-465c-9234-7e4543fe4538" containerName="ovsdbserver-sb" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.419032 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="70ec8329-7d58-465c-9234-7e4543fe4538" containerName="ovsdbserver-sb" Jan 21 15:48:56 crc kubenswrapper[5021]: E0121 15:48:56.419058 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="70ec8329-7d58-465c-9234-7e4543fe4538" containerName="openstack-network-exporter" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.419067 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="70ec8329-7d58-465c-9234-7e4543fe4538" containerName="openstack-network-exporter" Jan 21 15:48:56 crc kubenswrapper[5021]: E0121 15:48:56.419081 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4687452c-74ba-4f3e-ac17-1cf4c2e514d8" containerName="proxy-httpd" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.419089 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="4687452c-74ba-4f3e-ac17-1cf4c2e514d8" containerName="proxy-httpd" Jan 21 15:48:56 crc kubenswrapper[5021]: E0121 15:48:56.419100 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4687452c-74ba-4f3e-ac17-1cf4c2e514d8" containerName="proxy-server" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.419106 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="4687452c-74ba-4f3e-ac17-1cf4c2e514d8" containerName="proxy-server" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.419308 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="4687452c-74ba-4f3e-ac17-1cf4c2e514d8" containerName="proxy-server" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.419333 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="70ec8329-7d58-465c-9234-7e4543fe4538" containerName="openstack-network-exporter" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.419345 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="70ec8329-7d58-465c-9234-7e4543fe4538" containerName="ovsdbserver-sb" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.419357 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="4687452c-74ba-4f3e-ac17-1cf4c2e514d8" containerName="proxy-httpd" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.419829 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4687452c-74ba-4f3e-ac17-1cf4c2e514d8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4687452c-74ba-4f3e-ac17-1cf4c2e514d8" (UID: "4687452c-74ba-4f3e-ac17-1cf4c2e514d8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.420090 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-0c6d-account-create-update-7tq5h" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.424176 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.446343 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/edf8a635-556a-46dc-82a4-68a4d40a7381-operator-scripts\") pod \"edf8a635-556a-46dc-82a4-68a4d40a7381\" (UID: \"edf8a635-556a-46dc-82a4-68a4d40a7381\") " Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.446439 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9gzpn\" (UniqueName: \"kubernetes.io/projected/edf8a635-556a-46dc-82a4-68a4d40a7381-kube-api-access-9gzpn\") pod \"edf8a635-556a-46dc-82a4-68a4d40a7381\" (UID: \"edf8a635-556a-46dc-82a4-68a4d40a7381\") " Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.447145 5021 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4687452c-74ba-4f3e-ac17-1cf4c2e514d8-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.447166 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4687452c-74ba-4f3e-ac17-1cf4c2e514d8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.447186 5021 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4687452c-74ba-4f3e-ac17-1cf4c2e514d8-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.447648 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-0c6d-account-create-update-7tq5h"] Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.448795 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/edf8a635-556a-46dc-82a4-68a4d40a7381-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "edf8a635-556a-46dc-82a4-68a4d40a7381" (UID: "edf8a635-556a-46dc-82a4-68a4d40a7381"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.464228 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/edf8a635-556a-46dc-82a4-68a4d40a7381-kube-api-access-9gzpn" (OuterVolumeSpecName: "kube-api-access-9gzpn") pod "edf8a635-556a-46dc-82a4-68a4d40a7381" (UID: "edf8a635-556a-46dc-82a4-68a4d40a7381"). InnerVolumeSpecName "kube-api-access-9gzpn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.471874 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-gcqqr"] Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.480298 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-xtcpm"] Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.493569 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-xtcpm"] Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.521121 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4687452c-74ba-4f3e-ac17-1cf4c2e514d8-config-data" (OuterVolumeSpecName: "config-data") pod "4687452c-74ba-4f3e-ac17-1cf4c2e514d8" (UID: "4687452c-74ba-4f3e-ac17-1cf4c2e514d8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.533064 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-gcqqr"] Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.548367 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x6fch\" (UniqueName: \"kubernetes.io/projected/2479b07a-7464-41b3-9e38-8e5b2e046542-kube-api-access-x6fch\") pod \"keystone-0c6d-account-create-update-7tq5h\" (UID: \"2479b07a-7464-41b3-9e38-8e5b2e046542\") " pod="openstack/keystone-0c6d-account-create-update-7tq5h" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.548750 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2479b07a-7464-41b3-9e38-8e5b2e046542-operator-scripts\") pod \"keystone-0c6d-account-create-update-7tq5h\" (UID: \"2479b07a-7464-41b3-9e38-8e5b2e046542\") " pod="openstack/keystone-0c6d-account-create-update-7tq5h" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.548927 5021 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/edf8a635-556a-46dc-82a4-68a4d40a7381-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.548939 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9gzpn\" (UniqueName: \"kubernetes.io/projected/edf8a635-556a-46dc-82a4-68a4d40a7381-kube-api-access-9gzpn\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.548950 5021 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4687452c-74ba-4f3e-ac17-1cf4c2e514d8-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.550443 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-548db5cc6d-pjhdh"] Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.550676 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/keystone-548db5cc6d-pjhdh" podUID="b4d4c24c-c623-4b7a-92e2-151d132cdebf" containerName="keystone-api" containerID="cri-o://3f4a4bee362fa146509f572eb2487ecaf6587a8240e667f1b0d30966317f0b72" gracePeriod=30 Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.560142 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-galera-0"] Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.571121 5021 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-7b8886d4fd-qn9sz" podUID="fad66107-0589-4ed8-94dc-fd29f2f58c43" containerName="barbican-api-log" probeResult="failure" output="Get \"https://10.217.0.162:9311/healthcheck\": read tcp 10.217.0.2:40352->10.217.0.162:9311: read: connection reset by peer" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.571447 5021 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-7b8886d4fd-qn9sz" podUID="fad66107-0589-4ed8-94dc-fd29f2f58c43" containerName="barbican-api" probeResult="failure" output="Get \"https://10.217.0.162:9311/healthcheck\": read tcp 10.217.0.2:40344->10.217.0.162:9311: read: connection reset by peer" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.577998 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-0c6d-account-create-update-7tq5h"] Jan 21 15:48:56 crc kubenswrapper[5021]: E0121 15:48:56.578688 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[kube-api-access-x6fch operator-scripts], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/keystone-0c6d-account-create-update-7tq5h" podUID="2479b07a-7464-41b3-9e38-8e5b2e046542" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.591381 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-zl2nk"] Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.651643 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x6fch\" (UniqueName: \"kubernetes.io/projected/2479b07a-7464-41b3-9e38-8e5b2e046542-kube-api-access-x6fch\") pod \"keystone-0c6d-account-create-update-7tq5h\" (UID: \"2479b07a-7464-41b3-9e38-8e5b2e046542\") " pod="openstack/keystone-0c6d-account-create-update-7tq5h" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.651724 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2479b07a-7464-41b3-9e38-8e5b2e046542-operator-scripts\") pod \"keystone-0c6d-account-create-update-7tq5h\" (UID: \"2479b07a-7464-41b3-9e38-8e5b2e046542\") " pod="openstack/keystone-0c6d-account-create-update-7tq5h" Jan 21 15:48:56 crc kubenswrapper[5021]: E0121 15:48:56.651867 5021 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Jan 21 15:48:56 crc kubenswrapper[5021]: E0121 15:48:56.651930 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/2479b07a-7464-41b3-9e38-8e5b2e046542-operator-scripts podName:2479b07a-7464-41b3-9e38-8e5b2e046542 nodeName:}" failed. No retries permitted until 2026-01-21 15:48:57.151899805 +0000 UTC m=+1478.687013694 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/2479b07a-7464-41b3-9e38-8e5b2e046542-operator-scripts") pod "keystone-0c6d-account-create-update-7tq5h" (UID: "2479b07a-7464-41b3-9e38-8e5b2e046542") : configmap "openstack-scripts" not found Jan 21 15:48:56 crc kubenswrapper[5021]: E0121 15:48:56.658788 5021 projected.go:194] Error preparing data for projected volume kube-api-access-x6fch for pod openstack/keystone-0c6d-account-create-update-7tq5h: failed to fetch token: serviceaccounts "galera-openstack" not found Jan 21 15:48:56 crc kubenswrapper[5021]: E0121 15:48:56.658855 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/2479b07a-7464-41b3-9e38-8e5b2e046542-kube-api-access-x6fch podName:2479b07a-7464-41b3-9e38-8e5b2e046542 nodeName:}" failed. No retries permitted until 2026-01-21 15:48:57.158837239 +0000 UTC m=+1478.693951128 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-x6fch" (UniqueName: "kubernetes.io/projected/2479b07a-7464-41b3-9e38-8e5b2e046542-kube-api-access-x6fch") pod "keystone-0c6d-account-create-update-7tq5h" (UID: "2479b07a-7464-41b3-9e38-8e5b2e046542") : failed to fetch token: serviceaccounts "galera-openstack" not found Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.752046 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0018f301-49d2-4884-abf4-23b4687de8fd" path="/var/lib/kubelet/pods/0018f301-49d2-4884-abf4-23b4687de8fd/volumes" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.753190 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="16b38c07-3cc7-45b6-9145-514af8206bdb" path="/var/lib/kubelet/pods/16b38c07-3cc7-45b6-9145-514af8206bdb/volumes" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.754015 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="62419df2-740b-473d-8fff-9ea018a268e5" path="/var/lib/kubelet/pods/62419df2-740b-473d-8fff-9ea018a268e5/volumes" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.755367 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="70ec8329-7d58-465c-9234-7e4543fe4538" path="/var/lib/kubelet/pods/70ec8329-7d58-465c-9234-7e4543fe4538/volumes" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.755975 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7369dbac-285b-4322-8322-41b1b450d199" path="/var/lib/kubelet/pods/7369dbac-285b-4322-8322-41b1b450d199/volumes" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.757286 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="791aef8a-49df-41be-8a61-7837ae62a00a" path="/var/lib/kubelet/pods/791aef8a-49df-41be-8a61-7837ae62a00a/volumes" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.757821 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7c60e68a-50d2-402d-a040-085c245b9836" path="/var/lib/kubelet/pods/7c60e68a-50d2-402d-a040-085c245b9836/volumes" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.758368 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9984d786-ae3e-4cfe-8bf6-099159dada65" path="/var/lib/kubelet/pods/9984d786-ae3e-4cfe-8bf6-099159dada65/volumes" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.758960 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca" path="/var/lib/kubelet/pods/deb21bc7-83dd-4cbd-9ea2-a2378b4e12ca/volumes" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.760055 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e360fedf-3856-4f89-980d-f5f282e2f696" path="/var/lib/kubelet/pods/e360fedf-3856-4f89-980d-f5f282e2f696/volumes" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.801497 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-galera-0" podUID="4184ba08-6582-4367-abd3-9e9cffb5b716" containerName="galera" containerID="cri-o://4a59683749a5d31a2fcc97158cd1d9ceb81e127e3f847cce0c972bac9d288d8e" gracePeriod=30 Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.846019 5021 generic.go:334] "Generic (PLEG): container finished" podID="7ccf7211-3a03-41f1-839a-7bda93e55d4b" containerID="34073a8e93f07e196867b52269b44be932eaae6b829c7faf37daff1fefaef5dd" exitCode=0 Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.846078 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"7ccf7211-3a03-41f1-839a-7bda93e55d4b","Type":"ContainerDied","Data":"34073a8e93f07e196867b52269b44be932eaae6b829c7faf37daff1fefaef5dd"} Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.848771 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-10e4-account-create-update-ngtt5" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.848766 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-10e4-account-create-update-ngtt5" event={"ID":"edf8a635-556a-46dc-82a4-68a4d40a7381","Type":"ContainerDied","Data":"8d4110520046ac794cecec1ce7cdc3fb0c5f924c9c38e91159c6f6ff53b988dc"} Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.852180 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-7bbf467d99-62cpf" event={"ID":"4687452c-74ba-4f3e-ac17-1cf4c2e514d8","Type":"ContainerDied","Data":"f7c52590fe65cfef2e3a4e669943ba59aa01519cb28c7c39ea0ef813edfa066b"} Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.852289 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-7bbf467d99-62cpf" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.864816 5021 generic.go:334] "Generic (PLEG): container finished" podID="446aadfb-ac91-4335-9bac-4f8d7663ab6a" containerID="6b8005ac26237642083ae21d321912d31a41bc53e8cc8714923e3a28c95e2695" exitCode=0 Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.864879 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-0c6d-account-create-update-7tq5h" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.864865 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"446aadfb-ac91-4335-9bac-4f8d7663ab6a","Type":"ContainerDied","Data":"6b8005ac26237642083ae21d321912d31a41bc53e8cc8714923e3a28c95e2695"} Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.884999 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-0c6d-account-create-update-7tq5h" Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.920962 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-10e4-account-create-update-ngtt5"] Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.942456 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-10e4-account-create-update-ngtt5"] Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.953189 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-proxy-7bbf467d99-62cpf"] Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.965248 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-proxy-7bbf467d99-62cpf"] Jan 21 15:48:56 crc kubenswrapper[5021]: I0121 15:48:56.979656 5021 scope.go:117] "RemoveContainer" containerID="c2577e6926c2b20d5997e13950dc6cfaeb16569002c47c4ba9f66d3aaaed7055" Jan 21 15:48:57 crc kubenswrapper[5021]: I0121 15:48:57.009715 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-ckt26"] Jan 21 15:48:57 crc kubenswrapper[5021]: I0121 15:48:57.019361 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-zl2nk"] Jan 21 15:48:57 crc kubenswrapper[5021]: I0121 15:48:57.028845 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-ckt26"] Jan 21 15:48:57 crc kubenswrapper[5021]: E0121 15:48:57.109477 5021 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 21 15:48:57 crc kubenswrapper[5021]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[/bin/sh -c #!/bin/bash Jan 21 15:48:57 crc kubenswrapper[5021]: Jan 21 15:48:57 crc kubenswrapper[5021]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 21 15:48:57 crc kubenswrapper[5021]: Jan 21 15:48:57 crc kubenswrapper[5021]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 21 15:48:57 crc kubenswrapper[5021]: Jan 21 15:48:57 crc kubenswrapper[5021]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 21 15:48:57 crc kubenswrapper[5021]: Jan 21 15:48:57 crc kubenswrapper[5021]: if [ -n "" ]; then Jan 21 15:48:57 crc kubenswrapper[5021]: GRANT_DATABASE="" Jan 21 15:48:57 crc kubenswrapper[5021]: else Jan 21 15:48:57 crc kubenswrapper[5021]: GRANT_DATABASE="*" Jan 21 15:48:57 crc kubenswrapper[5021]: fi Jan 21 15:48:57 crc kubenswrapper[5021]: Jan 21 15:48:57 crc kubenswrapper[5021]: # going for maximum compatibility here: Jan 21 15:48:57 crc kubenswrapper[5021]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 21 15:48:57 crc kubenswrapper[5021]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 21 15:48:57 crc kubenswrapper[5021]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 21 15:48:57 crc kubenswrapper[5021]: # support updates Jan 21 15:48:57 crc kubenswrapper[5021]: Jan 21 15:48:57 crc kubenswrapper[5021]: $MYSQL_CMD < logger="UnhandledError" Jan 21 15:48:57 crc kubenswrapper[5021]: E0121 15:48:57.110832 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"openstack-mariadb-root-db-secret\\\" not found\"" pod="openstack/root-account-create-update-zl2nk" podUID="270b5844-50a8-4a16-8c12-73c4c209aab1" Jan 21 15:48:57 crc kubenswrapper[5021]: I0121 15:48:57.162052 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x6fch\" (UniqueName: \"kubernetes.io/projected/2479b07a-7464-41b3-9e38-8e5b2e046542-kube-api-access-x6fch\") pod \"keystone-0c6d-account-create-update-7tq5h\" (UID: \"2479b07a-7464-41b3-9e38-8e5b2e046542\") " pod="openstack/keystone-0c6d-account-create-update-7tq5h" Jan 21 15:48:57 crc kubenswrapper[5021]: I0121 15:48:57.162157 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2479b07a-7464-41b3-9e38-8e5b2e046542-operator-scripts\") pod \"keystone-0c6d-account-create-update-7tq5h\" (UID: \"2479b07a-7464-41b3-9e38-8e5b2e046542\") " pod="openstack/keystone-0c6d-account-create-update-7tq5h" Jan 21 15:48:57 crc kubenswrapper[5021]: E0121 15:48:57.162286 5021 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Jan 21 15:48:57 crc kubenswrapper[5021]: E0121 15:48:57.162335 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/2479b07a-7464-41b3-9e38-8e5b2e046542-operator-scripts podName:2479b07a-7464-41b3-9e38-8e5b2e046542 nodeName:}" failed. No retries permitted until 2026-01-21 15:48:58.162319436 +0000 UTC m=+1479.697433325 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/2479b07a-7464-41b3-9e38-8e5b2e046542-operator-scripts") pod "keystone-0c6d-account-create-update-7tq5h" (UID: "2479b07a-7464-41b3-9e38-8e5b2e046542") : configmap "openstack-scripts" not found Jan 21 15:48:57 crc kubenswrapper[5021]: E0121 15:48:57.166186 5021 projected.go:194] Error preparing data for projected volume kube-api-access-x6fch for pod openstack/keystone-0c6d-account-create-update-7tq5h: failed to fetch token: serviceaccounts "galera-openstack" not found Jan 21 15:48:57 crc kubenswrapper[5021]: E0121 15:48:57.166253 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/2479b07a-7464-41b3-9e38-8e5b2e046542-kube-api-access-x6fch podName:2479b07a-7464-41b3-9e38-8e5b2e046542 nodeName:}" failed. No retries permitted until 2026-01-21 15:48:58.16623555 +0000 UTC m=+1479.701349439 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-x6fch" (UniqueName: "kubernetes.io/projected/2479b07a-7464-41b3-9e38-8e5b2e046542-kube-api-access-x6fch") pod "keystone-0c6d-account-create-update-7tq5h" (UID: "2479b07a-7464-41b3-9e38-8e5b2e046542") : failed to fetch token: serviceaccounts "galera-openstack" not found Jan 21 15:48:57 crc kubenswrapper[5021]: I0121 15:48:57.284075 5021 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="110a1110-f52a-40e4-8402-166be87650a8" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.207:8775/\": read tcp 10.217.0.2:37954->10.217.0.207:8775: read: connection reset by peer" Jan 21 15:48:57 crc kubenswrapper[5021]: I0121 15:48:57.284075 5021 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="110a1110-f52a-40e4-8402-166be87650a8" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.207:8775/\": read tcp 10.217.0.2:37938->10.217.0.207:8775: read: connection reset by peer" Jan 21 15:48:57 crc kubenswrapper[5021]: E0121 15:48:57.489569 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a650fe4d3e5d417b81ce5f0db674f5341402d7b7ced37533a3ae41abbd2df4b7 is running failed: container process not found" containerID="a650fe4d3e5d417b81ce5f0db674f5341402d7b7ced37533a3ae41abbd2df4b7" cmd=["/usr/local/bin/container-scripts/ovn_controller_readiness.sh"] Jan 21 15:48:57 crc kubenswrapper[5021]: E0121 15:48:57.490304 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a650fe4d3e5d417b81ce5f0db674f5341402d7b7ced37533a3ae41abbd2df4b7 is running failed: container process not found" containerID="a650fe4d3e5d417b81ce5f0db674f5341402d7b7ced37533a3ae41abbd2df4b7" cmd=["/usr/local/bin/container-scripts/ovn_controller_readiness.sh"] Jan 21 15:48:57 crc kubenswrapper[5021]: E0121 15:48:57.499478 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a650fe4d3e5d417b81ce5f0db674f5341402d7b7ced37533a3ae41abbd2df4b7 is running failed: container process not found" containerID="a650fe4d3e5d417b81ce5f0db674f5341402d7b7ced37533a3ae41abbd2df4b7" cmd=["/usr/local/bin/container-scripts/ovn_controller_readiness.sh"] Jan 21 15:48:57 crc kubenswrapper[5021]: E0121 15:48:57.499563 5021 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a650fe4d3e5d417b81ce5f0db674f5341402d7b7ced37533a3ae41abbd2df4b7 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-xqkct" podUID="a5d30216-0406-4ff3-a645-880381c2a661" containerName="ovn-controller" Jan 21 15:48:57 crc kubenswrapper[5021]: E0121 15:48:57.538328 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c51e2000b1b5cdd558697f3c6e26fdd8147721993df1b96e0e8e62f974486b5d is running failed: container process not found" containerID="c51e2000b1b5cdd558697f3c6e26fdd8147721993df1b96e0e8e62f974486b5d" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 21 15:48:57 crc kubenswrapper[5021]: E0121 15:48:57.538951 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c51e2000b1b5cdd558697f3c6e26fdd8147721993df1b96e0e8e62f974486b5d is running failed: container process not found" containerID="c51e2000b1b5cdd558697f3c6e26fdd8147721993df1b96e0e8e62f974486b5d" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 21 15:48:57 crc kubenswrapper[5021]: E0121 15:48:57.539252 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c51e2000b1b5cdd558697f3c6e26fdd8147721993df1b96e0e8e62f974486b5d is running failed: container process not found" containerID="c51e2000b1b5cdd558697f3c6e26fdd8147721993df1b96e0e8e62f974486b5d" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 21 15:48:57 crc kubenswrapper[5021]: E0121 15:48:57.539283 5021 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c51e2000b1b5cdd558697f3c6e26fdd8147721993df1b96e0e8e62f974486b5d is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-bk98m" podUID="06ba8703-2573-4c30-82ec-36290cf378f4" containerName="ovsdb-server" Jan 21 15:48:57 crc kubenswrapper[5021]: E0121 15:48:57.549399 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c9aea0f956b83c345479dd657b4586b966e9d9026a0a0d504fe374b1d7498204" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 21 15:48:57 crc kubenswrapper[5021]: E0121 15:48:57.551738 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c9aea0f956b83c345479dd657b4586b966e9d9026a0a0d504fe374b1d7498204" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 21 15:48:57 crc kubenswrapper[5021]: E0121 15:48:57.553230 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c9aea0f956b83c345479dd657b4586b966e9d9026a0a0d504fe374b1d7498204" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 21 15:48:57 crc kubenswrapper[5021]: E0121 15:48:57.553266 5021 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-bk98m" podUID="06ba8703-2573-4c30-82ec-36290cf378f4" containerName="ovs-vswitchd" Jan 21 15:48:57 crc kubenswrapper[5021]: I0121 15:48:57.883834 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-4f73-account-create-update-c2q4j" Jan 21 15:48:57 crc kubenswrapper[5021]: E0121 15:48:57.884058 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="fbc7ac5c64b14be83eb8080ee8b54339e51698653f7de8500a2e5fc7fd361ff2" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 21 15:48:57 crc kubenswrapper[5021]: E0121 15:48:57.904982 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="fbc7ac5c64b14be83eb8080ee8b54339e51698653f7de8500a2e5fc7fd361ff2" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 21 15:48:57 crc kubenswrapper[5021]: E0121 15:48:57.914550 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="fbc7ac5c64b14be83eb8080ee8b54339e51698653f7de8500a2e5fc7fd361ff2" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 21 15:48:57 crc kubenswrapper[5021]: I0121 15:48:57.914654 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-mpfp7" Jan 21 15:48:57 crc kubenswrapper[5021]: E0121 15:48:57.914663 5021 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="e56d063f-18e5-49af-8bfc-892629a34e88" containerName="nova-cell0-conductor-conductor" Jan 21 15:48:57 crc kubenswrapper[5021]: I0121 15:48:57.924458 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-1670-account-create-update-5jkx8" Jan 21 15:48:57 crc kubenswrapper[5021]: I0121 15:48:57.926367 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"446aadfb-ac91-4335-9bac-4f8d7663ab6a","Type":"ContainerDied","Data":"76d465a0e5670e2e5c75f70f0e448f889c1854efb506c3af594373823736f312"} Jan 21 15:48:57 crc kubenswrapper[5021]: I0121 15:48:57.926412 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="76d465a0e5670e2e5c75f70f0e448f889c1854efb506c3af594373823736f312" Jan 21 15:48:57 crc kubenswrapper[5021]: I0121 15:48:57.928581 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-48eb-account-create-update-scv9x" Jan 21 15:48:57 crc kubenswrapper[5021]: I0121 15:48:57.953254 5021 generic.go:334] "Generic (PLEG): container finished" podID="55bbd00b-56a2-42a4-a75a-39daba5e3ba6" containerID="9e0d0434088b24ecb2f7a6a737f0cc34be54ffb9cbf7f5f7696a923ceacf6bc2" exitCode=0 Jan 21 15:48:57 crc kubenswrapper[5021]: I0121 15:48:57.953298 5021 generic.go:334] "Generic (PLEG): container finished" podID="55bbd00b-56a2-42a4-a75a-39daba5e3ba6" containerID="9e7ddce4cf8d1e90155d69318107e51e50dde636777370df6d9675e6e804665e" exitCode=2 Jan 21 15:48:57 crc kubenswrapper[5021]: I0121 15:48:57.953308 5021 generic.go:334] "Generic (PLEG): container finished" podID="55bbd00b-56a2-42a4-a75a-39daba5e3ba6" containerID="5037559fb60350e2158a6a5f376df3b44cba445783affafde419d451bd46ca8e" exitCode=0 Jan 21 15:48:57 crc kubenswrapper[5021]: I0121 15:48:57.953355 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"55bbd00b-56a2-42a4-a75a-39daba5e3ba6","Type":"ContainerDied","Data":"9e0d0434088b24ecb2f7a6a737f0cc34be54ffb9cbf7f5f7696a923ceacf6bc2"} Jan 21 15:48:57 crc kubenswrapper[5021]: I0121 15:48:57.953388 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"55bbd00b-56a2-42a4-a75a-39daba5e3ba6","Type":"ContainerDied","Data":"9e7ddce4cf8d1e90155d69318107e51e50dde636777370df6d9675e6e804665e"} Jan 21 15:48:57 crc kubenswrapper[5021]: I0121 15:48:57.953401 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"55bbd00b-56a2-42a4-a75a-39daba5e3ba6","Type":"ContainerDied","Data":"5037559fb60350e2158a6a5f376df3b44cba445783affafde419d451bd46ca8e"} Jan 21 15:48:57 crc kubenswrapper[5021]: I0121 15:48:57.956529 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-0608-account-create-update-m8cb5" Jan 21 15:48:57 crc kubenswrapper[5021]: I0121 15:48:57.959470 5021 generic.go:334] "Generic (PLEG): container finished" podID="110a1110-f52a-40e4-8402-166be87650a8" containerID="feae471e1c50172422c6097ccee57bce6ab91a98c54d1223f046f0f30e158360" exitCode=0 Jan 21 15:48:57 crc kubenswrapper[5021]: I0121 15:48:57.959606 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"110a1110-f52a-40e4-8402-166be87650a8","Type":"ContainerDied","Data":"feae471e1c50172422c6097ccee57bce6ab91a98c54d1223f046f0f30e158360"} Jan 21 15:48:57 crc kubenswrapper[5021]: I0121 15:48:57.980548 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dba40158-ac19-4635-8e0d-c97ab15f65bf-operator-scripts\") pod \"dba40158-ac19-4635-8e0d-c97ab15f65bf\" (UID: \"dba40158-ac19-4635-8e0d-c97ab15f65bf\") " Jan 21 15:48:57 crc kubenswrapper[5021]: I0121 15:48:57.980585 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a157b13a-50bf-4c22-805f-cd042780925c-operator-scripts\") pod \"a157b13a-50bf-4c22-805f-cd042780925c\" (UID: \"a157b13a-50bf-4c22-805f-cd042780925c\") " Jan 21 15:48:57 crc kubenswrapper[5021]: I0121 15:48:57.980622 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/317e6b88-faf8-418a-8036-79ec4dacd19e-operator-scripts\") pod \"317e6b88-faf8-418a-8036-79ec4dacd19e\" (UID: \"317e6b88-faf8-418a-8036-79ec4dacd19e\") " Jan 21 15:48:57 crc kubenswrapper[5021]: I0121 15:48:57.980752 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vjxjj\" (UniqueName: \"kubernetes.io/projected/dba40158-ac19-4635-8e0d-c97ab15f65bf-kube-api-access-vjxjj\") pod \"dba40158-ac19-4635-8e0d-c97ab15f65bf\" (UID: \"dba40158-ac19-4635-8e0d-c97ab15f65bf\") " Jan 21 15:48:57 crc kubenswrapper[5021]: I0121 15:48:57.980791 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fpmbl\" (UniqueName: \"kubernetes.io/projected/8b26760a-85cf-4259-affd-9fa52e3766fe-kube-api-access-fpmbl\") pod \"8b26760a-85cf-4259-affd-9fa52e3766fe\" (UID: \"8b26760a-85cf-4259-affd-9fa52e3766fe\") " Jan 21 15:48:57 crc kubenswrapper[5021]: I0121 15:48:57.980845 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8b26760a-85cf-4259-affd-9fa52e3766fe-operator-scripts\") pod \"8b26760a-85cf-4259-affd-9fa52e3766fe\" (UID: \"8b26760a-85cf-4259-affd-9fa52e3766fe\") " Jan 21 15:48:57 crc kubenswrapper[5021]: I0121 15:48:57.980925 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lxtxw\" (UniqueName: \"kubernetes.io/projected/317e6b88-faf8-418a-8036-79ec4dacd19e-kube-api-access-lxtxw\") pod \"317e6b88-faf8-418a-8036-79ec4dacd19e\" (UID: \"317e6b88-faf8-418a-8036-79ec4dacd19e\") " Jan 21 15:48:57 crc kubenswrapper[5021]: I0121 15:48:57.980988 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-74m6d\" (UniqueName: \"kubernetes.io/projected/a157b13a-50bf-4c22-805f-cd042780925c-kube-api-access-74m6d\") pod \"a157b13a-50bf-4c22-805f-cd042780925c\" (UID: \"a157b13a-50bf-4c22-805f-cd042780925c\") " Jan 21 15:48:57 crc kubenswrapper[5021]: I0121 15:48:57.987899 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a157b13a-50bf-4c22-805f-cd042780925c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a157b13a-50bf-4c22-805f-cd042780925c" (UID: "a157b13a-50bf-4c22-805f-cd042780925c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:48:57 crc kubenswrapper[5021]: I0121 15:48:57.987999 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dba40158-ac19-4635-8e0d-c97ab15f65bf-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "dba40158-ac19-4635-8e0d-c97ab15f65bf" (UID: "dba40158-ac19-4635-8e0d-c97ab15f65bf"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:48:57 crc kubenswrapper[5021]: I0121 15:48:57.988403 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/317e6b88-faf8-418a-8036-79ec4dacd19e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "317e6b88-faf8-418a-8036-79ec4dacd19e" (UID: "317e6b88-faf8-418a-8036-79ec4dacd19e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:48:57 crc kubenswrapper[5021]: I0121 15:48:57.992698 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8b26760a-85cf-4259-affd-9fa52e3766fe-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8b26760a-85cf-4259-affd-9fa52e3766fe" (UID: "8b26760a-85cf-4259-affd-9fa52e3766fe"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:48:57 crc kubenswrapper[5021]: I0121 15:48:57.993036 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/317e6b88-faf8-418a-8036-79ec4dacd19e-kube-api-access-lxtxw" (OuterVolumeSpecName: "kube-api-access-lxtxw") pod "317e6b88-faf8-418a-8036-79ec4dacd19e" (UID: "317e6b88-faf8-418a-8036-79ec4dacd19e"). InnerVolumeSpecName "kube-api-access-lxtxw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.001281 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8b26760a-85cf-4259-affd-9fa52e3766fe-kube-api-access-fpmbl" (OuterVolumeSpecName: "kube-api-access-fpmbl") pod "8b26760a-85cf-4259-affd-9fa52e3766fe" (UID: "8b26760a-85cf-4259-affd-9fa52e3766fe"). InnerVolumeSpecName "kube-api-access-fpmbl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.003320 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dba40158-ac19-4635-8e0d-c97ab15f65bf-kube-api-access-vjxjj" (OuterVolumeSpecName: "kube-api-access-vjxjj") pod "dba40158-ac19-4635-8e0d-c97ab15f65bf" (UID: "dba40158-ac19-4635-8e0d-c97ab15f65bf"). InnerVolumeSpecName "kube-api-access-vjxjj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.025963 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a157b13a-50bf-4c22-805f-cd042780925c-kube-api-access-74m6d" (OuterVolumeSpecName: "kube-api-access-74m6d") pod "a157b13a-50bf-4c22-805f-cd042780925c" (UID: "a157b13a-50bf-4c22-805f-cd042780925c"). InnerVolumeSpecName "kube-api-access-74m6d". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.026659 5021 generic.go:334] "Generic (PLEG): container finished" podID="fad66107-0589-4ed8-94dc-fd29f2f58c43" containerID="3e2ad39675705ef9c70fa28adb97c9a01666ba00b4202ad69b0fcb8f9b4aba7d" exitCode=0 Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.026728 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7b8886d4fd-qn9sz" event={"ID":"fad66107-0589-4ed8-94dc-fd29f2f58c43","Type":"ContainerDied","Data":"3e2ad39675705ef9c70fa28adb97c9a01666ba00b4202ad69b0fcb8f9b4aba7d"} Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.028185 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.030458 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.030586 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.032019 5021 generic.go:334] "Generic (PLEG): container finished" podID="a6ed9dcf-812f-4945-ac9d-43839bb27349" containerID="c6c487064a50a13818b80449ec45c315dcbd4c10322323fbef91c6b9e74ca755" exitCode=0 Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.032078 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"a6ed9dcf-812f-4945-ac9d-43839bb27349","Type":"ContainerDied","Data":"c6c487064a50a13818b80449ec45c315dcbd4c10322323fbef91c6b9e74ca755"} Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.032851 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-0608-account-create-update-m8cb5" event={"ID":"52d192b4-971e-419a-8c85-cf70066656e7","Type":"ContainerDied","Data":"3860ca94cea7159c12cde31be0406ea7a93d7c5b39f77c68305cbae433229292"} Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.032897 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-0608-account-create-update-m8cb5" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.035554 5021 generic.go:334] "Generic (PLEG): container finished" podID="469c5416-c102-43c5-8801-502231a86238" containerID="ed0b25896af93d99f78d4d4db9ef15750f9683c1f7556443210e10912e9c3954" exitCode=0 Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.035642 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-57f8ddbc76-dgfjh" event={"ID":"469c5416-c102-43c5-8801-502231a86238","Type":"ContainerDied","Data":"ed0b25896af93d99f78d4d4db9ef15750f9683c1f7556443210e10912e9c3954"} Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.039641 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-xqkct_a5d30216-0406-4ff3-a645-880381c2a661/ovn-controller/0.log" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.039692 5021 generic.go:334] "Generic (PLEG): container finished" podID="a5d30216-0406-4ff3-a645-880381c2a661" containerID="a650fe4d3e5d417b81ce5f0db674f5341402d7b7ced37533a3ae41abbd2df4b7" exitCode=143 Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.039728 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-xqkct" event={"ID":"a5d30216-0406-4ff3-a645-880381c2a661","Type":"ContainerDied","Data":"a650fe4d3e5d417b81ce5f0db674f5341402d7b7ced37533a3ae41abbd2df4b7"} Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.041787 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"7ccf7211-3a03-41f1-839a-7bda93e55d4b","Type":"ContainerDied","Data":"9b8c21990b5804486fbc2e525d78a868a72c17f1257e2d98eda9e31e3995200e"} Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.041883 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.042890 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-48eb-account-create-update-scv9x" event={"ID":"dba40158-ac19-4635-8e0d-c97ab15f65bf","Type":"ContainerDied","Data":"20957605c401031c9c41315516de257224d49b2e01fab96349e127a76af39c2a"} Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.042965 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-48eb-account-create-update-scv9x" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.059103 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-1670-account-create-update-5jkx8" event={"ID":"317e6b88-faf8-418a-8036-79ec4dacd19e","Type":"ContainerDied","Data":"9db1c833d6638eeef7a2d65e86d37de67d80a6d5b91fec1dbc9de144feb3b666"} Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.059171 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-1670-account-create-update-5jkx8" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.062116 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-4f73-account-create-update-c2q4j" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.062221 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-4f73-account-create-update-c2q4j" event={"ID":"8b26760a-85cf-4259-affd-9fa52e3766fe","Type":"ContainerDied","Data":"79ba363f56f749b1e561187f10a6a315b6418702b88191ad8d3707413af9bcdd"} Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.066232 5021 generic.go:334] "Generic (PLEG): container finished" podID="ad959625-d43f-48c3-b42f-d35e63e9af44" containerID="bfdbb4c8a7a050190ec619b218dfb9517faf71368ffc310feb532deef033dc55" exitCode=0 Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.066287 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ad959625-d43f-48c3-b42f-d35e63e9af44","Type":"ContainerDied","Data":"bfdbb4c8a7a050190ec619b218dfb9517faf71368ffc310feb532deef033dc55"} Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.066311 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ad959625-d43f-48c3-b42f-d35e63e9af44","Type":"ContainerDied","Data":"8d1dcbb53dfb4f22cdb7b139dc6aed995e911cda8c1e7170f2e3e0b46df1dea0"} Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.066393 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.082646 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bfgxq\" (UniqueName: \"kubernetes.io/projected/7ccf7211-3a03-41f1-839a-7bda93e55d4b-kube-api-access-bfgxq\") pod \"7ccf7211-3a03-41f1-839a-7bda93e55d4b\" (UID: \"7ccf7211-3a03-41f1-839a-7bda93e55d4b\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.082700 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/52d192b4-971e-419a-8c85-cf70066656e7-operator-scripts\") pod \"52d192b4-971e-419a-8c85-cf70066656e7\" (UID: \"52d192b4-971e-419a-8c85-cf70066656e7\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.082732 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v8rs7\" (UniqueName: \"kubernetes.io/projected/446aadfb-ac91-4335-9bac-4f8d7663ab6a-kube-api-access-v8rs7\") pod \"446aadfb-ac91-4335-9bac-4f8d7663ab6a\" (UID: \"446aadfb-ac91-4335-9bac-4f8d7663ab6a\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.082774 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/446aadfb-ac91-4335-9bac-4f8d7663ab6a-scripts\") pod \"446aadfb-ac91-4335-9bac-4f8d7663ab6a\" (UID: \"446aadfb-ac91-4335-9bac-4f8d7663ab6a\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.082816 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/446aadfb-ac91-4335-9bac-4f8d7663ab6a-logs\") pod \"446aadfb-ac91-4335-9bac-4f8d7663ab6a\" (UID: \"446aadfb-ac91-4335-9bac-4f8d7663ab6a\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.082839 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ccf7211-3a03-41f1-839a-7bda93e55d4b-config-data\") pod \"7ccf7211-3a03-41f1-839a-7bda93e55d4b\" (UID: \"7ccf7211-3a03-41f1-839a-7bda93e55d4b\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.082869 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ccf7211-3a03-41f1-839a-7bda93e55d4b-internal-tls-certs\") pod \"7ccf7211-3a03-41f1-839a-7bda93e55d4b\" (UID: \"7ccf7211-3a03-41f1-839a-7bda93e55d4b\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.082900 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/446aadfb-ac91-4335-9bac-4f8d7663ab6a-config-data\") pod \"446aadfb-ac91-4335-9bac-4f8d7663ab6a\" (UID: \"446aadfb-ac91-4335-9bac-4f8d7663ab6a\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.083031 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/446aadfb-ac91-4335-9bac-4f8d7663ab6a-combined-ca-bundle\") pod \"446aadfb-ac91-4335-9bac-4f8d7663ab6a\" (UID: \"446aadfb-ac91-4335-9bac-4f8d7663ab6a\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.083053 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7ccf7211-3a03-41f1-839a-7bda93e55d4b-scripts\") pod \"7ccf7211-3a03-41f1-839a-7bda93e55d4b\" (UID: \"7ccf7211-3a03-41f1-839a-7bda93e55d4b\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.083090 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad959625-d43f-48c3-b42f-d35e63e9af44-combined-ca-bundle\") pod \"ad959625-d43f-48c3-b42f-d35e63e9af44\" (UID: \"ad959625-d43f-48c3-b42f-d35e63e9af44\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.083117 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ccf7211-3a03-41f1-839a-7bda93e55d4b-combined-ca-bundle\") pod \"7ccf7211-3a03-41f1-839a-7bda93e55d4b\" (UID: \"7ccf7211-3a03-41f1-839a-7bda93e55d4b\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.083141 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gb6b5\" (UniqueName: \"kubernetes.io/projected/ad959625-d43f-48c3-b42f-d35e63e9af44-kube-api-access-gb6b5\") pod \"ad959625-d43f-48c3-b42f-d35e63e9af44\" (UID: \"ad959625-d43f-48c3-b42f-d35e63e9af44\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.083170 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"446aadfb-ac91-4335-9bac-4f8d7663ab6a\" (UID: \"446aadfb-ac91-4335-9bac-4f8d7663ab6a\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.083198 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ad959625-d43f-48c3-b42f-d35e63e9af44-public-tls-certs\") pod \"ad959625-d43f-48c3-b42f-d35e63e9af44\" (UID: \"ad959625-d43f-48c3-b42f-d35e63e9af44\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.083229 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/446aadfb-ac91-4335-9bac-4f8d7663ab6a-httpd-run\") pod \"446aadfb-ac91-4335-9bac-4f8d7663ab6a\" (UID: \"446aadfb-ac91-4335-9bac-4f8d7663ab6a\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.083267 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/446aadfb-ac91-4335-9bac-4f8d7663ab6a-internal-tls-certs\") pod \"446aadfb-ac91-4335-9bac-4f8d7663ab6a\" (UID: \"446aadfb-ac91-4335-9bac-4f8d7663ab6a\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.083266 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/52d192b4-971e-419a-8c85-cf70066656e7-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "52d192b4-971e-419a-8c85-cf70066656e7" (UID: "52d192b4-971e-419a-8c85-cf70066656e7"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.083288 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad959625-d43f-48c3-b42f-d35e63e9af44-config-data\") pod \"ad959625-d43f-48c3-b42f-d35e63e9af44\" (UID: \"ad959625-d43f-48c3-b42f-d35e63e9af44\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.083369 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cxvl9\" (UniqueName: \"kubernetes.io/projected/52d192b4-971e-419a-8c85-cf70066656e7-kube-api-access-cxvl9\") pod \"52d192b4-971e-419a-8c85-cf70066656e7\" (UID: \"52d192b4-971e-419a-8c85-cf70066656e7\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.083419 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ad959625-d43f-48c3-b42f-d35e63e9af44-internal-tls-certs\") pod \"ad959625-d43f-48c3-b42f-d35e63e9af44\" (UID: \"ad959625-d43f-48c3-b42f-d35e63e9af44\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.083464 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7ccf7211-3a03-41f1-839a-7bda93e55d4b-logs\") pod \"7ccf7211-3a03-41f1-839a-7bda93e55d4b\" (UID: \"7ccf7211-3a03-41f1-839a-7bda93e55d4b\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.083491 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7ccf7211-3a03-41f1-839a-7bda93e55d4b-etc-machine-id\") pod \"7ccf7211-3a03-41f1-839a-7bda93e55d4b\" (UID: \"7ccf7211-3a03-41f1-839a-7bda93e55d4b\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.083530 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7ccf7211-3a03-41f1-839a-7bda93e55d4b-config-data-custom\") pod \"7ccf7211-3a03-41f1-839a-7bda93e55d4b\" (UID: \"7ccf7211-3a03-41f1-839a-7bda93e55d4b\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.083561 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ccf7211-3a03-41f1-839a-7bda93e55d4b-public-tls-certs\") pod \"7ccf7211-3a03-41f1-839a-7bda93e55d4b\" (UID: \"7ccf7211-3a03-41f1-839a-7bda93e55d4b\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.083599 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ad959625-d43f-48c3-b42f-d35e63e9af44-logs\") pod \"ad959625-d43f-48c3-b42f-d35e63e9af44\" (UID: \"ad959625-d43f-48c3-b42f-d35e63e9af44\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.084402 5021 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8b26760a-85cf-4259-affd-9fa52e3766fe-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.084422 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lxtxw\" (UniqueName: \"kubernetes.io/projected/317e6b88-faf8-418a-8036-79ec4dacd19e-kube-api-access-lxtxw\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.084435 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-74m6d\" (UniqueName: \"kubernetes.io/projected/a157b13a-50bf-4c22-805f-cd042780925c-kube-api-access-74m6d\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.084453 5021 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/52d192b4-971e-419a-8c85-cf70066656e7-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.084468 5021 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dba40158-ac19-4635-8e0d-c97ab15f65bf-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.084478 5021 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a157b13a-50bf-4c22-805f-cd042780925c-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.084486 5021 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/317e6b88-faf8-418a-8036-79ec4dacd19e-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.084495 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vjxjj\" (UniqueName: \"kubernetes.io/projected/dba40158-ac19-4635-8e0d-c97ab15f65bf-kube-api-access-vjxjj\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.084503 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fpmbl\" (UniqueName: \"kubernetes.io/projected/8b26760a-85cf-4259-affd-9fa52e3766fe-kube-api-access-fpmbl\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.085061 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ad959625-d43f-48c3-b42f-d35e63e9af44-logs" (OuterVolumeSpecName: "logs") pod "ad959625-d43f-48c3-b42f-d35e63e9af44" (UID: "ad959625-d43f-48c3-b42f-d35e63e9af44"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.085300 5021 generic.go:334] "Generic (PLEG): container finished" podID="fb584a2d-b396-4850-a7b5-3d827c42fe5a" containerID="9d81dbb84baa56528fb5b7d80ccd46ca92f10dbc96d74d445172433eb0f4dc44" exitCode=0 Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.085397 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"fb584a2d-b396-4850-a7b5-3d827c42fe5a","Type":"ContainerDied","Data":"9d81dbb84baa56528fb5b7d80ccd46ca92f10dbc96d74d445172433eb0f4dc44"} Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.085479 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7ccf7211-3a03-41f1-839a-7bda93e55d4b-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "7ccf7211-3a03-41f1-839a-7bda93e55d4b" (UID: "7ccf7211-3a03-41f1-839a-7bda93e55d4b"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.085651 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/446aadfb-ac91-4335-9bac-4f8d7663ab6a-logs" (OuterVolumeSpecName: "logs") pod "446aadfb-ac91-4335-9bac-4f8d7663ab6a" (UID: "446aadfb-ac91-4335-9bac-4f8d7663ab6a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.089501 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/446aadfb-ac91-4335-9bac-4f8d7663ab6a-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "446aadfb-ac91-4335-9bac-4f8d7663ab6a" (UID: "446aadfb-ac91-4335-9bac-4f8d7663ab6a"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.089901 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7ccf7211-3a03-41f1-839a-7bda93e55d4b-logs" (OuterVolumeSpecName: "logs") pod "7ccf7211-3a03-41f1-839a-7bda93e55d4b" (UID: "7ccf7211-3a03-41f1-839a-7bda93e55d4b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.090338 5021 generic.go:334] "Generic (PLEG): container finished" podID="5073fbf8-f2ef-49e7-8b07-d90b1822b414" containerID="81466562175ec89583498029b73cadcc8d26846a5fbb385a68626b0ba993a0c2" exitCode=2 Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.090460 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"5073fbf8-f2ef-49e7-8b07-d90b1822b414","Type":"ContainerDied","Data":"81466562175ec89583498029b73cadcc8d26846a5fbb385a68626b0ba993a0c2"} Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.101382 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/446aadfb-ac91-4335-9bac-4f8d7663ab6a-scripts" (OuterVolumeSpecName: "scripts") pod "446aadfb-ac91-4335-9bac-4f8d7663ab6a" (UID: "446aadfb-ac91-4335-9bac-4f8d7663ab6a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.102590 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-mpfp7" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.102604 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-mpfp7" event={"ID":"a157b13a-50bf-4c22-805f-cd042780925c","Type":"ContainerDied","Data":"c1d3a031f7c2b8db4994ba4b9675cd7526402e91de2f5574c6ebb5cf89f77015"} Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.109364 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "glance") pod "446aadfb-ac91-4335-9bac-4f8d7663ab6a" (UID: "446aadfb-ac91-4335-9bac-4f8d7663ab6a"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.109630 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-zl2nk" event={"ID":"270b5844-50a8-4a16-8c12-73c4c209aab1","Type":"ContainerStarted","Data":"2acf30296f0d75a56333cf99c42a3076576409f94c0d544d90e6dffc26248184"} Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.110179 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ccf7211-3a03-41f1-839a-7bda93e55d4b-scripts" (OuterVolumeSpecName: "scripts") pod "7ccf7211-3a03-41f1-839a-7bda93e55d4b" (UID: "7ccf7211-3a03-41f1-839a-7bda93e55d4b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.113529 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ccf7211-3a03-41f1-839a-7bda93e55d4b-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "7ccf7211-3a03-41f1-839a-7bda93e55d4b" (UID: "7ccf7211-3a03-41f1-839a-7bda93e55d4b"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.113582 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/446aadfb-ac91-4335-9bac-4f8d7663ab6a-kube-api-access-v8rs7" (OuterVolumeSpecName: "kube-api-access-v8rs7") pod "446aadfb-ac91-4335-9bac-4f8d7663ab6a" (UID: "446aadfb-ac91-4335-9bac-4f8d7663ab6a"). InnerVolumeSpecName "kube-api-access-v8rs7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.113689 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/52d192b4-971e-419a-8c85-cf70066656e7-kube-api-access-cxvl9" (OuterVolumeSpecName: "kube-api-access-cxvl9") pod "52d192b4-971e-419a-8c85-cf70066656e7" (UID: "52d192b4-971e-419a-8c85-cf70066656e7"). InnerVolumeSpecName "kube-api-access-cxvl9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.113815 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7ccf7211-3a03-41f1-839a-7bda93e55d4b-kube-api-access-bfgxq" (OuterVolumeSpecName: "kube-api-access-bfgxq") pod "7ccf7211-3a03-41f1-839a-7bda93e55d4b" (UID: "7ccf7211-3a03-41f1-839a-7bda93e55d4b"). InnerVolumeSpecName "kube-api-access-bfgxq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.116544 5021 generic.go:334] "Generic (PLEG): container finished" podID="0415e622-e0cf-4097-865a-a0970f2acc07" containerID="20a7c05d680426c518dcf812d8d7a9481aa09f5c574f8ae028a68e0fbb6c1a5e" exitCode=0 Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.116623 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-0c6d-account-create-update-7tq5h" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.117292 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"0415e622-e0cf-4097-865a-a0970f2acc07","Type":"ContainerDied","Data":"20a7c05d680426c518dcf812d8d7a9481aa09f5c574f8ae028a68e0fbb6c1a5e"} Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.118156 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ad959625-d43f-48c3-b42f-d35e63e9af44-kube-api-access-gb6b5" (OuterVolumeSpecName: "kube-api-access-gb6b5") pod "ad959625-d43f-48c3-b42f-d35e63e9af44" (UID: "ad959625-d43f-48c3-b42f-d35e63e9af44"). InnerVolumeSpecName "kube-api-access-gb6b5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.124068 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ccf7211-3a03-41f1-839a-7bda93e55d4b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7ccf7211-3a03-41f1-839a-7bda93e55d4b" (UID: "7ccf7211-3a03-41f1-839a-7bda93e55d4b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.139131 5021 scope.go:117] "RemoveContainer" containerID="0771568118c8d7a6aa5ededbd663532cb393bc36afe29638946df189c2108dfc" Jan 21 15:48:58 crc kubenswrapper[5021]: E0121 15:48:58.140508 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="16060f449a7025ebebb9de9a238ef2f530ca6e0bfb74d144bfcdaf9b91b44f23" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 21 15:48:58 crc kubenswrapper[5021]: E0121 15:48:58.146936 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="16060f449a7025ebebb9de9a238ef2f530ca6e0bfb74d144bfcdaf9b91b44f23" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 21 15:48:58 crc kubenswrapper[5021]: E0121 15:48:58.157035 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="16060f449a7025ebebb9de9a238ef2f530ca6e0bfb74d144bfcdaf9b91b44f23" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 21 15:48:58 crc kubenswrapper[5021]: E0121 15:48:58.157131 5021 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="0787e96e-5c19-467d-9ad4-ec70202c8cdf" containerName="nova-scheduler-scheduler" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.171637 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad959625-d43f-48c3-b42f-d35e63e9af44-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ad959625-d43f-48c3-b42f-d35e63e9af44" (UID: "ad959625-d43f-48c3-b42f-d35e63e9af44"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.188279 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x6fch\" (UniqueName: \"kubernetes.io/projected/2479b07a-7464-41b3-9e38-8e5b2e046542-kube-api-access-x6fch\") pod \"keystone-0c6d-account-create-update-7tq5h\" (UID: \"2479b07a-7464-41b3-9e38-8e5b2e046542\") " pod="openstack/keystone-0c6d-account-create-update-7tq5h" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.188368 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2479b07a-7464-41b3-9e38-8e5b2e046542-operator-scripts\") pod \"keystone-0c6d-account-create-update-7tq5h\" (UID: \"2479b07a-7464-41b3-9e38-8e5b2e046542\") " pod="openstack/keystone-0c6d-account-create-update-7tq5h" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.188466 5021 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7ccf7211-3a03-41f1-839a-7bda93e55d4b-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.188479 5021 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ad959625-d43f-48c3-b42f-d35e63e9af44-logs\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.188492 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bfgxq\" (UniqueName: \"kubernetes.io/projected/7ccf7211-3a03-41f1-839a-7bda93e55d4b-kube-api-access-bfgxq\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.188506 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v8rs7\" (UniqueName: \"kubernetes.io/projected/446aadfb-ac91-4335-9bac-4f8d7663ab6a-kube-api-access-v8rs7\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.188520 5021 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/446aadfb-ac91-4335-9bac-4f8d7663ab6a-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.188531 5021 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/446aadfb-ac91-4335-9bac-4f8d7663ab6a-logs\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.188541 5021 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7ccf7211-3a03-41f1-839a-7bda93e55d4b-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.188551 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad959625-d43f-48c3-b42f-d35e63e9af44-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.188562 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ccf7211-3a03-41f1-839a-7bda93e55d4b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.188573 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gb6b5\" (UniqueName: \"kubernetes.io/projected/ad959625-d43f-48c3-b42f-d35e63e9af44-kube-api-access-gb6b5\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.188596 5021 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.188608 5021 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/446aadfb-ac91-4335-9bac-4f8d7663ab6a-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.188622 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cxvl9\" (UniqueName: \"kubernetes.io/projected/52d192b4-971e-419a-8c85-cf70066656e7-kube-api-access-cxvl9\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.188632 5021 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7ccf7211-3a03-41f1-839a-7bda93e55d4b-logs\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.188643 5021 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7ccf7211-3a03-41f1-839a-7bda93e55d4b-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: E0121 15:48:58.189334 5021 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Jan 21 15:48:58 crc kubenswrapper[5021]: E0121 15:48:58.189469 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/2479b07a-7464-41b3-9e38-8e5b2e046542-operator-scripts podName:2479b07a-7464-41b3-9e38-8e5b2e046542 nodeName:}" failed. No retries permitted until 2026-01-21 15:49:00.189448495 +0000 UTC m=+1481.724562464 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/2479b07a-7464-41b3-9e38-8e5b2e046542-operator-scripts") pod "keystone-0c6d-account-create-update-7tq5h" (UID: "2479b07a-7464-41b3-9e38-8e5b2e046542") : configmap "openstack-scripts" not found Jan 21 15:48:58 crc kubenswrapper[5021]: E0121 15:48:58.190610 5021 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Jan 21 15:48:58 crc kubenswrapper[5021]: E0121 15:48:58.190744 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-config-data podName:b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b nodeName:}" failed. No retries permitted until 2026-01-21 15:49:06.190731879 +0000 UTC m=+1487.725845848 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-config-data") pod "rabbitmq-server-0" (UID: "b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b") : configmap "rabbitmq-config-data" not found Jan 21 15:48:58 crc kubenswrapper[5021]: E0121 15:48:58.192994 5021 projected.go:194] Error preparing data for projected volume kube-api-access-x6fch for pod openstack/keystone-0c6d-account-create-update-7tq5h: failed to fetch token: serviceaccounts "galera-openstack" not found Jan 21 15:48:58 crc kubenswrapper[5021]: E0121 15:48:58.193085 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/2479b07a-7464-41b3-9e38-8e5b2e046542-kube-api-access-x6fch podName:2479b07a-7464-41b3-9e38-8e5b2e046542 nodeName:}" failed. No retries permitted until 2026-01-21 15:49:00.193063241 +0000 UTC m=+1481.728177220 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-x6fch" (UniqueName: "kubernetes.io/projected/2479b07a-7464-41b3-9e38-8e5b2e046542-kube-api-access-x6fch") pod "keystone-0c6d-account-create-update-7tq5h" (UID: "2479b07a-7464-41b3-9e38-8e5b2e046542") : failed to fetch token: serviceaccounts "galera-openstack" not found Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.198119 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-48eb-account-create-update-scv9x"] Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.216552 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-48eb-account-create-update-scv9x"] Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.225319 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/446aadfb-ac91-4335-9bac-4f8d7663ab6a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "446aadfb-ac91-4335-9bac-4f8d7663ab6a" (UID: "446aadfb-ac91-4335-9bac-4f8d7663ab6a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.255027 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-4f73-account-create-update-c2q4j"] Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.264503 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-4f73-account-create-update-c2q4j"] Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.273533 5021 scope.go:117] "RemoveContainer" containerID="437d3b10fb0fb297b844f9dbf1d4a83367b420ef57cd073914c0525d5c579f5d" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.286974 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-1670-account-create-update-5jkx8"] Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.290414 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/446aadfb-ac91-4335-9bac-4f8d7663ab6a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.297145 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7b8886d4fd-qn9sz" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.301405 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-1670-account-create-update-5jkx8"] Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.318093 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-xqkct_a5d30216-0406-4ff3-a645-880381c2a661/ovn-controller/0.log" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.318169 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-xqkct" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.334838 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-0c6d-account-create-update-7tq5h"] Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.348084 5021 scope.go:117] "RemoveContainer" containerID="f5e3f741e90cddcaa56c488a9ce56cfd5d36717ab8f8f3d4ee72791c52b6336c" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.348948 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-0c6d-account-create-update-7tq5h"] Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.363272 5021 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.366626 5021 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.101:5671: connect: connection refused" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.375065 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.379102 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ccf7211-3a03-41f1-839a-7bda93e55d4b-config-data" (OuterVolumeSpecName: "config-data") pod "7ccf7211-3a03-41f1-839a-7bda93e55d4b" (UID: "7ccf7211-3a03-41f1-839a-7bda93e55d4b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.379938 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad959625-d43f-48c3-b42f-d35e63e9af44-config-data" (OuterVolumeSpecName: "config-data") pod "ad959625-d43f-48c3-b42f-d35e63e9af44" (UID: "ad959625-d43f-48c3-b42f-d35e63e9af44"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.391164 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5d30216-0406-4ff3-a645-880381c2a661-combined-ca-bundle\") pod \"a5d30216-0406-4ff3-a645-880381c2a661\" (UID: \"a5d30216-0406-4ff3-a645-880381c2a661\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.391248 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gzlln\" (UniqueName: \"kubernetes.io/projected/a5d30216-0406-4ff3-a645-880381c2a661-kube-api-access-gzlln\") pod \"a5d30216-0406-4ff3-a645-880381c2a661\" (UID: \"a5d30216-0406-4ff3-a645-880381c2a661\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.391283 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/a5d30216-0406-4ff3-a645-880381c2a661-var-run\") pod \"a5d30216-0406-4ff3-a645-880381c2a661\" (UID: \"a5d30216-0406-4ff3-a645-880381c2a661\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.391369 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fad66107-0589-4ed8-94dc-fd29f2f58c43-internal-tls-certs\") pod \"fad66107-0589-4ed8-94dc-fd29f2f58c43\" (UID: \"fad66107-0589-4ed8-94dc-fd29f2f58c43\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.391415 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fad66107-0589-4ed8-94dc-fd29f2f58c43-public-tls-certs\") pod \"fad66107-0589-4ed8-94dc-fd29f2f58c43\" (UID: \"fad66107-0589-4ed8-94dc-fd29f2f58c43\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.391501 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fad66107-0589-4ed8-94dc-fd29f2f58c43-combined-ca-bundle\") pod \"fad66107-0589-4ed8-94dc-fd29f2f58c43\" (UID: \"fad66107-0589-4ed8-94dc-fd29f2f58c43\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.391600 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fad66107-0589-4ed8-94dc-fd29f2f58c43-config-data\") pod \"fad66107-0589-4ed8-94dc-fd29f2f58c43\" (UID: \"fad66107-0589-4ed8-94dc-fd29f2f58c43\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.391639 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fad66107-0589-4ed8-94dc-fd29f2f58c43-logs\") pod \"fad66107-0589-4ed8-94dc-fd29f2f58c43\" (UID: \"fad66107-0589-4ed8-94dc-fd29f2f58c43\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.391666 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a5d30216-0406-4ff3-a645-880381c2a661-scripts\") pod \"a5d30216-0406-4ff3-a645-880381c2a661\" (UID: \"a5d30216-0406-4ff3-a645-880381c2a661\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.391689 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-br6k2\" (UniqueName: \"kubernetes.io/projected/fad66107-0589-4ed8-94dc-fd29f2f58c43-kube-api-access-br6k2\") pod \"fad66107-0589-4ed8-94dc-fd29f2f58c43\" (UID: \"fad66107-0589-4ed8-94dc-fd29f2f58c43\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.391866 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/a5d30216-0406-4ff3-a645-880381c2a661-var-log-ovn\") pod \"a5d30216-0406-4ff3-a645-880381c2a661\" (UID: \"a5d30216-0406-4ff3-a645-880381c2a661\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.392197 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/a5d30216-0406-4ff3-a645-880381c2a661-var-run-ovn\") pod \"a5d30216-0406-4ff3-a645-880381c2a661\" (UID: \"a5d30216-0406-4ff3-a645-880381c2a661\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.392451 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/a5d30216-0406-4ff3-a645-880381c2a661-ovn-controller-tls-certs\") pod \"a5d30216-0406-4ff3-a645-880381c2a661\" (UID: \"a5d30216-0406-4ff3-a645-880381c2a661\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.392472 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fad66107-0589-4ed8-94dc-fd29f2f58c43-config-data-custom\") pod \"fad66107-0589-4ed8-94dc-fd29f2f58c43\" (UID: \"fad66107-0589-4ed8-94dc-fd29f2f58c43\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.393979 5021 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.394682 5021 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad959625-d43f-48c3-b42f-d35e63e9af44-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.394703 5021 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2479b07a-7464-41b3-9e38-8e5b2e046542-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.394713 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x6fch\" (UniqueName: \"kubernetes.io/projected/2479b07a-7464-41b3-9e38-8e5b2e046542-kube-api-access-x6fch\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.394721 5021 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ccf7211-3a03-41f1-839a-7bda93e55d4b-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.396284 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad959625-d43f-48c3-b42f-d35e63e9af44-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "ad959625-d43f-48c3-b42f-d35e63e9af44" (UID: "ad959625-d43f-48c3-b42f-d35e63e9af44"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.397231 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a5d30216-0406-4ff3-a645-880381c2a661-var-run" (OuterVolumeSpecName: "var-run") pod "a5d30216-0406-4ff3-a645-880381c2a661" (UID: "a5d30216-0406-4ff3-a645-880381c2a661"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.410412 5021 scope.go:117] "RemoveContainer" containerID="21d2a742eb18fb8403f215dcc111b5047c3d3f27852bd27e95a06b29a951b3cd" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.411155 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-mpfp7"] Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.411515 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a5d30216-0406-4ff3-a645-880381c2a661-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "a5d30216-0406-4ff3-a645-880381c2a661" (UID: "a5d30216-0406-4ff3-a645-880381c2a661"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.411553 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a5d30216-0406-4ff3-a645-880381c2a661-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "a5d30216-0406-4ff3-a645-880381c2a661" (UID: "a5d30216-0406-4ff3-a645-880381c2a661"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.412395 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fad66107-0589-4ed8-94dc-fd29f2f58c43-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "fad66107-0589-4ed8-94dc-fd29f2f58c43" (UID: "fad66107-0589-4ed8-94dc-fd29f2f58c43"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.414771 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a5d30216-0406-4ff3-a645-880381c2a661-kube-api-access-gzlln" (OuterVolumeSpecName: "kube-api-access-gzlln") pod "a5d30216-0406-4ff3-a645-880381c2a661" (UID: "a5d30216-0406-4ff3-a645-880381c2a661"). InnerVolumeSpecName "kube-api-access-gzlln". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.414820 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fad66107-0589-4ed8-94dc-fd29f2f58c43-kube-api-access-br6k2" (OuterVolumeSpecName: "kube-api-access-br6k2") pod "fad66107-0589-4ed8-94dc-fd29f2f58c43" (UID: "fad66107-0589-4ed8-94dc-fd29f2f58c43"). InnerVolumeSpecName "kube-api-access-br6k2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.416037 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fad66107-0589-4ed8-94dc-fd29f2f58c43-logs" (OuterVolumeSpecName: "logs") pod "fad66107-0589-4ed8-94dc-fd29f2f58c43" (UID: "fad66107-0589-4ed8-94dc-fd29f2f58c43"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.416442 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a5d30216-0406-4ff3-a645-880381c2a661-scripts" (OuterVolumeSpecName: "scripts") pod "a5d30216-0406-4ff3-a645-880381c2a661" (UID: "a5d30216-0406-4ff3-a645-880381c2a661"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.419138 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ccf7211-3a03-41f1-839a-7bda93e55d4b-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "7ccf7211-3a03-41f1-839a-7bda93e55d4b" (UID: "7ccf7211-3a03-41f1-839a-7bda93e55d4b"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.419246 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ccf7211-3a03-41f1-839a-7bda93e55d4b-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "7ccf7211-3a03-41f1-839a-7bda93e55d4b" (UID: "7ccf7211-3a03-41f1-839a-7bda93e55d4b"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.429712 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-mpfp7"] Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.442936 5021 scope.go:117] "RemoveContainer" containerID="16670df6a896fe5b4ccec437b88876c2007832ffa1ddab3c5a874249577f502d" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.464752 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.469997 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.496365 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0415e622-e0cf-4097-865a-a0970f2acc07-config-data\") pod \"0415e622-e0cf-4097-865a-a0970f2acc07\" (UID: \"0415e622-e0cf-4097-865a-a0970f2acc07\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.496877 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6ed9dcf-812f-4945-ac9d-43839bb27349-combined-ca-bundle\") pod \"a6ed9dcf-812f-4945-ac9d-43839bb27349\" (UID: \"a6ed9dcf-812f-4945-ac9d-43839bb27349\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.497086 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6ed9dcf-812f-4945-ac9d-43839bb27349-config-data\") pod \"a6ed9dcf-812f-4945-ac9d-43839bb27349\" (UID: \"a6ed9dcf-812f-4945-ac9d-43839bb27349\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.497302 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"0415e622-e0cf-4097-865a-a0970f2acc07\" (UID: \"0415e622-e0cf-4097-865a-a0970f2acc07\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.497412 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0415e622-e0cf-4097-865a-a0970f2acc07-combined-ca-bundle\") pod \"0415e622-e0cf-4097-865a-a0970f2acc07\" (UID: \"0415e622-e0cf-4097-865a-a0970f2acc07\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.499149 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0415e622-e0cf-4097-865a-a0970f2acc07-logs\") pod \"0415e622-e0cf-4097-865a-a0970f2acc07\" (UID: \"0415e622-e0cf-4097-865a-a0970f2acc07\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.499286 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0415e622-e0cf-4097-865a-a0970f2acc07-httpd-run\") pod \"0415e622-e0cf-4097-865a-a0970f2acc07\" (UID: \"0415e622-e0cf-4097-865a-a0970f2acc07\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.501081 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad959625-d43f-48c3-b42f-d35e63e9af44-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "ad959625-d43f-48c3-b42f-d35e63e9af44" (UID: "ad959625-d43f-48c3-b42f-d35e63e9af44"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.502968 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0415e622-e0cf-4097-865a-a0970f2acc07-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "0415e622-e0cf-4097-865a-a0970f2acc07" (UID: "0415e622-e0cf-4097-865a-a0970f2acc07"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.503453 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0415e622-e0cf-4097-865a-a0970f2acc07-logs" (OuterVolumeSpecName: "logs") pod "0415e622-e0cf-4097-865a-a0970f2acc07" (UID: "0415e622-e0cf-4097-865a-a0970f2acc07"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.504146 5021 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="2dff28e1-6d0f-4a7d-8fcf-0edf26e63825" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.102:5671: connect: connection refused" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.504645 5021 scope.go:117] "RemoveContainer" containerID="34073a8e93f07e196867b52269b44be932eaae6b829c7faf37daff1fefaef5dd" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.505167 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0415e622-e0cf-4097-865a-a0970f2acc07-public-tls-certs\") pod \"0415e622-e0cf-4097-865a-a0970f2acc07\" (UID: \"0415e622-e0cf-4097-865a-a0970f2acc07\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.505255 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x77w7\" (UniqueName: \"kubernetes.io/projected/a6ed9dcf-812f-4945-ac9d-43839bb27349-kube-api-access-x77w7\") pod \"a6ed9dcf-812f-4945-ac9d-43839bb27349\" (UID: \"a6ed9dcf-812f-4945-ac9d-43839bb27349\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.505296 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0415e622-e0cf-4097-865a-a0970f2acc07-scripts\") pod \"0415e622-e0cf-4097-865a-a0970f2acc07\" (UID: \"0415e622-e0cf-4097-865a-a0970f2acc07\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.505354 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ad959625-d43f-48c3-b42f-d35e63e9af44-public-tls-certs\") pod \"ad959625-d43f-48c3-b42f-d35e63e9af44\" (UID: \"ad959625-d43f-48c3-b42f-d35e63e9af44\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.505416 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wgmwm\" (UniqueName: \"kubernetes.io/projected/0415e622-e0cf-4097-865a-a0970f2acc07-kube-api-access-wgmwm\") pod \"0415e622-e0cf-4097-865a-a0970f2acc07\" (UID: \"0415e622-e0cf-4097-865a-a0970f2acc07\") " Jan 21 15:48:58 crc kubenswrapper[5021]: W0121 15:48:58.505570 5021 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/ad959625-d43f-48c3-b42f-d35e63e9af44/volumes/kubernetes.io~secret/public-tls-certs Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.505583 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad959625-d43f-48c3-b42f-d35e63e9af44-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "ad959625-d43f-48c3-b42f-d35e63e9af44" (UID: "ad959625-d43f-48c3-b42f-d35e63e9af44"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.506427 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-0608-account-create-update-m8cb5"] Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.508541 5021 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0415e622-e0cf-4097-865a-a0970f2acc07-logs\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.508576 5021 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/a5d30216-0406-4ff3-a645-880381c2a661-var-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.508588 5021 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0415e622-e0cf-4097-865a-a0970f2acc07-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.508603 5021 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fad66107-0589-4ed8-94dc-fd29f2f58c43-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.508617 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gzlln\" (UniqueName: \"kubernetes.io/projected/a5d30216-0406-4ff3-a645-880381c2a661-kube-api-access-gzlln\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.508629 5021 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/a5d30216-0406-4ff3-a645-880381c2a661-var-run\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.508641 5021 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ccf7211-3a03-41f1-839a-7bda93e55d4b-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.508653 5021 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ad959625-d43f-48c3-b42f-d35e63e9af44-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.508664 5021 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ad959625-d43f-48c3-b42f-d35e63e9af44-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.508675 5021 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fad66107-0589-4ed8-94dc-fd29f2f58c43-logs\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.508685 5021 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a5d30216-0406-4ff3-a645-880381c2a661-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.508696 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-br6k2\" (UniqueName: \"kubernetes.io/projected/fad66107-0589-4ed8-94dc-fd29f2f58c43-kube-api-access-br6k2\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.508706 5021 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/a5d30216-0406-4ff3-a645-880381c2a661-var-log-ovn\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.508718 5021 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ccf7211-3a03-41f1-839a-7bda93e55d4b-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.520900 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0415e622-e0cf-4097-865a-a0970f2acc07-scripts" (OuterVolumeSpecName: "scripts") pod "0415e622-e0cf-4097-865a-a0970f2acc07" (UID: "0415e622-e0cf-4097-865a-a0970f2acc07"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.523437 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0415e622-e0cf-4097-865a-a0970f2acc07-kube-api-access-wgmwm" (OuterVolumeSpecName: "kube-api-access-wgmwm") pod "0415e622-e0cf-4097-865a-a0970f2acc07" (UID: "0415e622-e0cf-4097-865a-a0970f2acc07"). InnerVolumeSpecName "kube-api-access-wgmwm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.524760 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/446aadfb-ac91-4335-9bac-4f8d7663ab6a-config-data" (OuterVolumeSpecName: "config-data") pod "446aadfb-ac91-4335-9bac-4f8d7663ab6a" (UID: "446aadfb-ac91-4335-9bac-4f8d7663ab6a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.535754 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-0608-account-create-update-m8cb5"] Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.543048 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a6ed9dcf-812f-4945-ac9d-43839bb27349-kube-api-access-x77w7" (OuterVolumeSpecName: "kube-api-access-x77w7") pod "a6ed9dcf-812f-4945-ac9d-43839bb27349" (UID: "a6ed9dcf-812f-4945-ac9d-43839bb27349"). InnerVolumeSpecName "kube-api-access-x77w7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.548474 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "glance") pod "0415e622-e0cf-4097-865a-a0970f2acc07" (UID: "0415e622-e0cf-4097-865a-a0970f2acc07"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.558465 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.570271 5021 scope.go:117] "RemoveContainer" containerID="acb12fa4b5f061852748af753502ce94371a4a867002ad11d238b65b996be3e7" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.596417 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/446aadfb-ac91-4335-9bac-4f8d7663ab6a-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "446aadfb-ac91-4335-9bac-4f8d7663ab6a" (UID: "446aadfb-ac91-4335-9bac-4f8d7663ab6a"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.602214 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5d30216-0406-4ff3-a645-880381c2a661-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a5d30216-0406-4ff3-a645-880381c2a661" (UID: "a5d30216-0406-4ff3-a645-880381c2a661"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.609509 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/110a1110-f52a-40e4-8402-166be87650a8-nova-metadata-tls-certs\") pod \"110a1110-f52a-40e4-8402-166be87650a8\" (UID: \"110a1110-f52a-40e4-8402-166be87650a8\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.609673 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/110a1110-f52a-40e4-8402-166be87650a8-logs\") pod \"110a1110-f52a-40e4-8402-166be87650a8\" (UID: \"110a1110-f52a-40e4-8402-166be87650a8\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.609764 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/fb584a2d-b396-4850-a7b5-3d827c42fe5a-kolla-config\") pod \"fb584a2d-b396-4850-a7b5-3d827c42fe5a\" (UID: \"fb584a2d-b396-4850-a7b5-3d827c42fe5a\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.609809 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/fb584a2d-b396-4850-a7b5-3d827c42fe5a-memcached-tls-certs\") pod \"fb584a2d-b396-4850-a7b5-3d827c42fe5a\" (UID: \"fb584a2d-b396-4850-a7b5-3d827c42fe5a\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.609851 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/110a1110-f52a-40e4-8402-166be87650a8-config-data\") pod \"110a1110-f52a-40e4-8402-166be87650a8\" (UID: \"110a1110-f52a-40e4-8402-166be87650a8\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.609894 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fb584a2d-b396-4850-a7b5-3d827c42fe5a-config-data\") pod \"fb584a2d-b396-4850-a7b5-3d827c42fe5a\" (UID: \"fb584a2d-b396-4850-a7b5-3d827c42fe5a\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.610030 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xs784\" (UniqueName: \"kubernetes.io/projected/fb584a2d-b396-4850-a7b5-3d827c42fe5a-kube-api-access-xs784\") pod \"fb584a2d-b396-4850-a7b5-3d827c42fe5a\" (UID: \"fb584a2d-b396-4850-a7b5-3d827c42fe5a\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.610069 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb584a2d-b396-4850-a7b5-3d827c42fe5a-combined-ca-bundle\") pod \"fb584a2d-b396-4850-a7b5-3d827c42fe5a\" (UID: \"fb584a2d-b396-4850-a7b5-3d827c42fe5a\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.610097 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ctsq9\" (UniqueName: \"kubernetes.io/projected/110a1110-f52a-40e4-8402-166be87650a8-kube-api-access-ctsq9\") pod \"110a1110-f52a-40e4-8402-166be87650a8\" (UID: \"110a1110-f52a-40e4-8402-166be87650a8\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.610127 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/110a1110-f52a-40e4-8402-166be87650a8-combined-ca-bundle\") pod \"110a1110-f52a-40e4-8402-166be87650a8\" (UID: \"110a1110-f52a-40e4-8402-166be87650a8\") " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.610769 5021 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.610801 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5d30216-0406-4ff3-a645-880381c2a661-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.610818 5021 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/446aadfb-ac91-4335-9bac-4f8d7663ab6a-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.610831 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x77w7\" (UniqueName: \"kubernetes.io/projected/a6ed9dcf-812f-4945-ac9d-43839bb27349-kube-api-access-x77w7\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.610844 5021 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0415e622-e0cf-4097-865a-a0970f2acc07-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.610858 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wgmwm\" (UniqueName: \"kubernetes.io/projected/0415e622-e0cf-4097-865a-a0970f2acc07-kube-api-access-wgmwm\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.610870 5021 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/446aadfb-ac91-4335-9bac-4f8d7663ab6a-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.612629 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fb584a2d-b396-4850-a7b5-3d827c42fe5a-config-data" (OuterVolumeSpecName: "config-data") pod "fb584a2d-b396-4850-a7b5-3d827c42fe5a" (UID: "fb584a2d-b396-4850-a7b5-3d827c42fe5a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.612750 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/110a1110-f52a-40e4-8402-166be87650a8-logs" (OuterVolumeSpecName: "logs") pod "110a1110-f52a-40e4-8402-166be87650a8" (UID: "110a1110-f52a-40e4-8402-166be87650a8"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.613768 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fb584a2d-b396-4850-a7b5-3d827c42fe5a-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "fb584a2d-b396-4850-a7b5-3d827c42fe5a" (UID: "fb584a2d-b396-4850-a7b5-3d827c42fe5a"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.630130 5021 scope.go:117] "RemoveContainer" containerID="bfdbb4c8a7a050190ec619b218dfb9517faf71368ffc310feb532deef033dc55" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.638424 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fad66107-0589-4ed8-94dc-fd29f2f58c43-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fad66107-0589-4ed8-94dc-fd29f2f58c43" (UID: "fad66107-0589-4ed8-94dc-fd29f2f58c43"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.640247 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/110a1110-f52a-40e4-8402-166be87650a8-kube-api-access-ctsq9" (OuterVolumeSpecName: "kube-api-access-ctsq9") pod "110a1110-f52a-40e4-8402-166be87650a8" (UID: "110a1110-f52a-40e4-8402-166be87650a8"). InnerVolumeSpecName "kube-api-access-ctsq9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.640815 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fb584a2d-b396-4850-a7b5-3d827c42fe5a-kube-api-access-xs784" (OuterVolumeSpecName: "kube-api-access-xs784") pod "fb584a2d-b396-4850-a7b5-3d827c42fe5a" (UID: "fb584a2d-b396-4850-a7b5-3d827c42fe5a"). InnerVolumeSpecName "kube-api-access-xs784". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.654075 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a6ed9dcf-812f-4945-ac9d-43839bb27349-config-data" (OuterVolumeSpecName: "config-data") pod "a6ed9dcf-812f-4945-ac9d-43839bb27349" (UID: "a6ed9dcf-812f-4945-ac9d-43839bb27349"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.698518 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fad66107-0589-4ed8-94dc-fd29f2f58c43-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "fad66107-0589-4ed8-94dc-fd29f2f58c43" (UID: "fad66107-0589-4ed8-94dc-fd29f2f58c43"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.708961 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0415e622-e0cf-4097-865a-a0970f2acc07-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0415e622-e0cf-4097-865a-a0970f2acc07" (UID: "0415e622-e0cf-4097-865a-a0970f2acc07"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.713264 5021 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fad66107-0589-4ed8-94dc-fd29f2f58c43-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.713299 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fad66107-0589-4ed8-94dc-fd29f2f58c43-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.713311 5021 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/110a1110-f52a-40e4-8402-166be87650a8-logs\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.713348 5021 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/fb584a2d-b396-4850-a7b5-3d827c42fe5a-kolla-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.713360 5021 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fb584a2d-b396-4850-a7b5-3d827c42fe5a-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.713371 5021 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6ed9dcf-812f-4945-ac9d-43839bb27349-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.713429 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0415e622-e0cf-4097-865a-a0970f2acc07-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.713446 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xs784\" (UniqueName: \"kubernetes.io/projected/fb584a2d-b396-4850-a7b5-3d827c42fe5a-kube-api-access-xs784\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.713460 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ctsq9\" (UniqueName: \"kubernetes.io/projected/110a1110-f52a-40e4-8402-166be87650a8-kube-api-access-ctsq9\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.718885 5021 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.740944 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a6ed9dcf-812f-4945-ac9d-43839bb27349-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a6ed9dcf-812f-4945-ac9d-43839bb27349" (UID: "a6ed9dcf-812f-4945-ac9d-43839bb27349"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.764121 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0415e622-e0cf-4097-865a-a0970f2acc07-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "0415e622-e0cf-4097-865a-a0970f2acc07" (UID: "0415e622-e0cf-4097-865a-a0970f2acc07"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.774629 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2479b07a-7464-41b3-9e38-8e5b2e046542" path="/var/lib/kubelet/pods/2479b07a-7464-41b3-9e38-8e5b2e046542/volumes" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.775158 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="317e6b88-faf8-418a-8036-79ec4dacd19e" path="/var/lib/kubelet/pods/317e6b88-faf8-418a-8036-79ec4dacd19e/volumes" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.775688 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4687452c-74ba-4f3e-ac17-1cf4c2e514d8" path="/var/lib/kubelet/pods/4687452c-74ba-4f3e-ac17-1cf4c2e514d8/volumes" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.794481 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="52d192b4-971e-419a-8c85-cf70066656e7" path="/var/lib/kubelet/pods/52d192b4-971e-419a-8c85-cf70066656e7/volumes" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.795087 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8b26760a-85cf-4259-affd-9fa52e3766fe" path="/var/lib/kubelet/pods/8b26760a-85cf-4259-affd-9fa52e3766fe/volumes" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.795562 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="966fe4ae-3218-4fe1-ac33-d3731130f13a" path="/var/lib/kubelet/pods/966fe4ae-3218-4fe1-ac33-d3731130f13a/volumes" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.796454 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a157b13a-50bf-4c22-805f-cd042780925c" path="/var/lib/kubelet/pods/a157b13a-50bf-4c22-805f-cd042780925c/volumes" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.796896 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dba40158-ac19-4635-8e0d-c97ab15f65bf" path="/var/lib/kubelet/pods/dba40158-ac19-4635-8e0d-c97ab15f65bf/volumes" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.798552 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="edf8a635-556a-46dc-82a4-68a4d40a7381" path="/var/lib/kubelet/pods/edf8a635-556a-46dc-82a4-68a4d40a7381/volumes" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.801559 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/110a1110-f52a-40e4-8402-166be87650a8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "110a1110-f52a-40e4-8402-166be87650a8" (UID: "110a1110-f52a-40e4-8402-166be87650a8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.815116 5021 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.815194 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/110a1110-f52a-40e4-8402-166be87650a8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.815211 5021 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0415e622-e0cf-4097-865a-a0970f2acc07-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.815222 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6ed9dcf-812f-4945-ac9d-43839bb27349-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.847835 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fb584a2d-b396-4850-a7b5-3d827c42fe5a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fb584a2d-b396-4850-a7b5-3d827c42fe5a" (UID: "fb584a2d-b396-4850-a7b5-3d827c42fe5a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.863469 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fad66107-0589-4ed8-94dc-fd29f2f58c43-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "fad66107-0589-4ed8-94dc-fd29f2f58c43" (UID: "fad66107-0589-4ed8-94dc-fd29f2f58c43"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.866380 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0415e622-e0cf-4097-865a-a0970f2acc07-config-data" (OuterVolumeSpecName: "config-data") pod "0415e622-e0cf-4097-865a-a0970f2acc07" (UID: "0415e622-e0cf-4097-865a-a0970f2acc07"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.907662 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fad66107-0589-4ed8-94dc-fd29f2f58c43-config-data" (OuterVolumeSpecName: "config-data") pod "fad66107-0589-4ed8-94dc-fd29f2f58c43" (UID: "fad66107-0589-4ed8-94dc-fd29f2f58c43"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.916668 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb584a2d-b396-4850-a7b5-3d827c42fe5a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.916705 5021 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fad66107-0589-4ed8-94dc-fd29f2f58c43-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.916720 5021 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0415e622-e0cf-4097-865a-a0970f2acc07-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.916733 5021 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fad66107-0589-4ed8-94dc-fd29f2f58c43-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.917845 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/110a1110-f52a-40e4-8402-166be87650a8-config-data" (OuterVolumeSpecName: "config-data") pod "110a1110-f52a-40e4-8402-166be87650a8" (UID: "110a1110-f52a-40e4-8402-166be87650a8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.937090 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5d30216-0406-4ff3-a645-880381c2a661-ovn-controller-tls-certs" (OuterVolumeSpecName: "ovn-controller-tls-certs") pod "a5d30216-0406-4ff3-a645-880381c2a661" (UID: "a5d30216-0406-4ff3-a645-880381c2a661"). InnerVolumeSpecName "ovn-controller-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.963226 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/110a1110-f52a-40e4-8402-166be87650a8-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "110a1110-f52a-40e4-8402-166be87650a8" (UID: "110a1110-f52a-40e4-8402-166be87650a8"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:58 crc kubenswrapper[5021]: I0121 15:48:58.963827 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fb584a2d-b396-4850-a7b5-3d827c42fe5a-memcached-tls-certs" (OuterVolumeSpecName: "memcached-tls-certs") pod "fb584a2d-b396-4850-a7b5-3d827c42fe5a" (UID: "fb584a2d-b396-4850-a7b5-3d827c42fe5a"). InnerVolumeSpecName "memcached-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.019425 5021 reconciler_common.go:293] "Volume detached for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/fb584a2d-b396-4850-a7b5-3d827c42fe5a-memcached-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.019470 5021 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/110a1110-f52a-40e4-8402-166be87650a8-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.019490 5021 reconciler_common.go:293] "Volume detached for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/a5d30216-0406-4ff3-a645-880381c2a661-ovn-controller-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.019504 5021 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/110a1110-f52a-40e4-8402-166be87650a8-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.039847 5021 scope.go:117] "RemoveContainer" containerID="d3222a69d9fcc61f98b79a68e5ce69836f0becb1b379e4dc1551706c9de15b9d" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.060541 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-57f8ddbc76-dgfjh" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.073889 5021 scope.go:117] "RemoveContainer" containerID="bfdbb4c8a7a050190ec619b218dfb9517faf71368ffc310feb532deef033dc55" Jan 21 15:48:59 crc kubenswrapper[5021]: E0121 15:48:59.074641 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bfdbb4c8a7a050190ec619b218dfb9517faf71368ffc310feb532deef033dc55\": container with ID starting with bfdbb4c8a7a050190ec619b218dfb9517faf71368ffc310feb532deef033dc55 not found: ID does not exist" containerID="bfdbb4c8a7a050190ec619b218dfb9517faf71368ffc310feb532deef033dc55" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.074679 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bfdbb4c8a7a050190ec619b218dfb9517faf71368ffc310feb532deef033dc55"} err="failed to get container status \"bfdbb4c8a7a050190ec619b218dfb9517faf71368ffc310feb532deef033dc55\": rpc error: code = NotFound desc = could not find container \"bfdbb4c8a7a050190ec619b218dfb9517faf71368ffc310feb532deef033dc55\": container with ID starting with bfdbb4c8a7a050190ec619b218dfb9517faf71368ffc310feb532deef033dc55 not found: ID does not exist" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.074698 5021 scope.go:117] "RemoveContainer" containerID="d3222a69d9fcc61f98b79a68e5ce69836f0becb1b379e4dc1551706c9de15b9d" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.075317 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 21 15:48:59 crc kubenswrapper[5021]: E0121 15:48:59.075377 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d3222a69d9fcc61f98b79a68e5ce69836f0becb1b379e4dc1551706c9de15b9d\": container with ID starting with d3222a69d9fcc61f98b79a68e5ce69836f0becb1b379e4dc1551706c9de15b9d not found: ID does not exist" containerID="d3222a69d9fcc61f98b79a68e5ce69836f0becb1b379e4dc1551706c9de15b9d" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.075393 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d3222a69d9fcc61f98b79a68e5ce69836f0becb1b379e4dc1551706c9de15b9d"} err="failed to get container status \"d3222a69d9fcc61f98b79a68e5ce69836f0becb1b379e4dc1551706c9de15b9d\": rpc error: code = NotFound desc = could not find container \"d3222a69d9fcc61f98b79a68e5ce69836f0becb1b379e4dc1551706c9de15b9d\": container with ID starting with d3222a69d9fcc61f98b79a68e5ce69836f0becb1b379e4dc1551706c9de15b9d not found: ID does not exist" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.077362 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.082973 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.096771 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-zl2nk" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.115878 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.120099 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/469c5416-c102-43c5-8801-502231a86238-public-tls-certs\") pod \"469c5416-c102-43c5-8801-502231a86238\" (UID: \"469c5416-c102-43c5-8801-502231a86238\") " Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.120146 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/469c5416-c102-43c5-8801-502231a86238-config-data\") pod \"469c5416-c102-43c5-8801-502231a86238\" (UID: \"469c5416-c102-43c5-8801-502231a86238\") " Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.120170 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/469c5416-c102-43c5-8801-502231a86238-scripts\") pod \"469c5416-c102-43c5-8801-502231a86238\" (UID: \"469c5416-c102-43c5-8801-502231a86238\") " Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.120231 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vjf57\" (UniqueName: \"kubernetes.io/projected/469c5416-c102-43c5-8801-502231a86238-kube-api-access-vjf57\") pod \"469c5416-c102-43c5-8801-502231a86238\" (UID: \"469c5416-c102-43c5-8801-502231a86238\") " Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.120281 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/469c5416-c102-43c5-8801-502231a86238-internal-tls-certs\") pod \"469c5416-c102-43c5-8801-502231a86238\" (UID: \"469c5416-c102-43c5-8801-502231a86238\") " Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.120325 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/469c5416-c102-43c5-8801-502231a86238-logs\") pod \"469c5416-c102-43c5-8801-502231a86238\" (UID: \"469c5416-c102-43c5-8801-502231a86238\") " Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.120483 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/469c5416-c102-43c5-8801-502231a86238-combined-ca-bundle\") pod \"469c5416-c102-43c5-8801-502231a86238\" (UID: \"469c5416-c102-43c5-8801-502231a86238\") " Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.121472 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/469c5416-c102-43c5-8801-502231a86238-logs" (OuterVolumeSpecName: "logs") pod "469c5416-c102-43c5-8801-502231a86238" (UID: "469c5416-c102-43c5-8801-502231a86238"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.123300 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.124277 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/469c5416-c102-43c5-8801-502231a86238-kube-api-access-vjf57" (OuterVolumeSpecName: "kube-api-access-vjf57") pod "469c5416-c102-43c5-8801-502231a86238" (UID: "469c5416-c102-43c5-8801-502231a86238"). InnerVolumeSpecName "kube-api-access-vjf57". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.124238 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/469c5416-c102-43c5-8801-502231a86238-scripts" (OuterVolumeSpecName: "scripts") pod "469c5416-c102-43c5-8801-502231a86238" (UID: "469c5416-c102-43c5-8801-502231a86238"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.154821 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-xqkct_a5d30216-0406-4ff3-a645-880381c2a661/ovn-controller/0.log" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.154889 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-xqkct" event={"ID":"a5d30216-0406-4ff3-a645-880381c2a661","Type":"ContainerDied","Data":"dcb097e7c3dec28fdfc1ca170003124e5cffa2856cb794d32f925ff59d6bbdb9"} Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.154959 5021 scope.go:117] "RemoveContainer" containerID="a650fe4d3e5d417b81ce5f0db674f5341402d7b7ced37533a3ae41abbd2df4b7" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.155151 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-xqkct" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.166955 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-zl2nk" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.166954 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-zl2nk" event={"ID":"270b5844-50a8-4a16-8c12-73c4c209aab1","Type":"ContainerDied","Data":"2acf30296f0d75a56333cf99c42a3076576409f94c0d544d90e6dffc26248184"} Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.175228 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-57f8ddbc76-dgfjh" event={"ID":"469c5416-c102-43c5-8801-502231a86238","Type":"ContainerDied","Data":"3bcbb563925e2a4544eefd2b4d1e327be52343aa34777acfdbac76cacde6fc5f"} Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.175324 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-57f8ddbc76-dgfjh" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.183989 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.184034 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"110a1110-f52a-40e4-8402-166be87650a8","Type":"ContainerDied","Data":"65731795182e43d75169509b5c1183611d7f9663ce1fd993666fb3255554e661"} Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.184567 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/469c5416-c102-43c5-8801-502231a86238-config-data" (OuterVolumeSpecName: "config-data") pod "469c5416-c102-43c5-8801-502231a86238" (UID: "469c5416-c102-43c5-8801-502231a86238"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.190572 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.191124 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"5073fbf8-f2ef-49e7-8b07-d90b1822b414","Type":"ContainerDied","Data":"c3611b4d67192fc20c2bc7d87bf6b3c5c011452c57850853d104e33d6f79dcea"} Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.196661 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"0415e622-e0cf-4097-865a-a0970f2acc07","Type":"ContainerDied","Data":"549188ecab545a9dba7f8ce68626136542e660601cf4fc70bf56379729785aaa"} Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.196725 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.200048 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/469c5416-c102-43c5-8801-502231a86238-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "469c5416-c102-43c5-8801-502231a86238" (UID: "469c5416-c102-43c5-8801-502231a86238"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.200207 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7b8886d4fd-qn9sz" event={"ID":"fad66107-0589-4ed8-94dc-fd29f2f58c43","Type":"ContainerDied","Data":"9d82cfb2a45dcf44725b742ad79254d95a917c7a0a786389a2eb05b04d7693e8"} Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.200229 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7b8886d4fd-qn9sz" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.209879 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"fb584a2d-b396-4850-a7b5-3d827c42fe5a","Type":"ContainerDied","Data":"34022b3320dcca12867c09d91ae9375c4ffad8abba87f069053418cbcdc45acc"} Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.210015 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.213271 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"a6ed9dcf-812f-4945-ac9d-43839bb27349","Type":"ContainerDied","Data":"849bfb0f3b8bb3a9d6d261a1faf0901d702687f18874bb4e8c9d0a7d202321d1"} Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.213343 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.218088 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.221837 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-xqkct"] Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.226606 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/270b5844-50a8-4a16-8c12-73c4c209aab1-operator-scripts\") pod \"270b5844-50a8-4a16-8c12-73c4c209aab1\" (UID: \"270b5844-50a8-4a16-8c12-73c4c209aab1\") " Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.226963 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-shw64\" (UniqueName: \"kubernetes.io/projected/270b5844-50a8-4a16-8c12-73c4c209aab1-kube-api-access-shw64\") pod \"270b5844-50a8-4a16-8c12-73c4c209aab1\" (UID: \"270b5844-50a8-4a16-8c12-73c4c209aab1\") " Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.227026 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/5073fbf8-f2ef-49e7-8b07-d90b1822b414-kube-state-metrics-tls-config\") pod \"5073fbf8-f2ef-49e7-8b07-d90b1822b414\" (UID: \"5073fbf8-f2ef-49e7-8b07-d90b1822b414\") " Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.227057 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5073fbf8-f2ef-49e7-8b07-d90b1822b414-combined-ca-bundle\") pod \"5073fbf8-f2ef-49e7-8b07-d90b1822b414\" (UID: \"5073fbf8-f2ef-49e7-8b07-d90b1822b414\") " Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.227135 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/5073fbf8-f2ef-49e7-8b07-d90b1822b414-kube-state-metrics-tls-certs\") pod \"5073fbf8-f2ef-49e7-8b07-d90b1822b414\" (UID: \"5073fbf8-f2ef-49e7-8b07-d90b1822b414\") " Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.227226 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wkzv5\" (UniqueName: \"kubernetes.io/projected/5073fbf8-f2ef-49e7-8b07-d90b1822b414-kube-api-access-wkzv5\") pod \"5073fbf8-f2ef-49e7-8b07-d90b1822b414\" (UID: \"5073fbf8-f2ef-49e7-8b07-d90b1822b414\") " Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.227246 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/270b5844-50a8-4a16-8c12-73c4c209aab1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "270b5844-50a8-4a16-8c12-73c4c209aab1" (UID: "270b5844-50a8-4a16-8c12-73c4c209aab1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.228738 5021 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/469c5416-c102-43c5-8801-502231a86238-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.228763 5021 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/469c5416-c102-43c5-8801-502231a86238-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.228776 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vjf57\" (UniqueName: \"kubernetes.io/projected/469c5416-c102-43c5-8801-502231a86238-kube-api-access-vjf57\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.228789 5021 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/469c5416-c102-43c5-8801-502231a86238-logs\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.228802 5021 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/270b5844-50a8-4a16-8c12-73c4c209aab1-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.228813 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/469c5416-c102-43c5-8801-502231a86238-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.232556 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/270b5844-50a8-4a16-8c12-73c4c209aab1-kube-api-access-shw64" (OuterVolumeSpecName: "kube-api-access-shw64") pod "270b5844-50a8-4a16-8c12-73c4c209aab1" (UID: "270b5844-50a8-4a16-8c12-73c4c209aab1"). InnerVolumeSpecName "kube-api-access-shw64". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.232608 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5073fbf8-f2ef-49e7-8b07-d90b1822b414-kube-api-access-wkzv5" (OuterVolumeSpecName: "kube-api-access-wkzv5") pod "5073fbf8-f2ef-49e7-8b07-d90b1822b414" (UID: "5073fbf8-f2ef-49e7-8b07-d90b1822b414"). InnerVolumeSpecName "kube-api-access-wkzv5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.235702 5021 scope.go:117] "RemoveContainer" containerID="ed0b25896af93d99f78d4d4db9ef15750f9683c1f7556443210e10912e9c3954" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.237184 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-xqkct"] Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.250535 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.259887 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/469c5416-c102-43c5-8801-502231a86238-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "469c5416-c102-43c5-8801-502231a86238" (UID: "469c5416-c102-43c5-8801-502231a86238"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.263972 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.274856 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.278052 5021 scope.go:117] "RemoveContainer" containerID="568335520c5abb99b1d9dd2a7aa68f565adae7b72f51cf91144f9ac64fbbdece" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.286274 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.290158 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/469c5416-c102-43c5-8801-502231a86238-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "469c5416-c102-43c5-8801-502231a86238" (UID: "469c5416-c102-43c5-8801-502231a86238"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.294187 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5073fbf8-f2ef-49e7-8b07-d90b1822b414-kube-state-metrics-tls-config" (OuterVolumeSpecName: "kube-state-metrics-tls-config") pod "5073fbf8-f2ef-49e7-8b07-d90b1822b414" (UID: "5073fbf8-f2ef-49e7-8b07-d90b1822b414"). InnerVolumeSpecName "kube-state-metrics-tls-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.295997 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/memcached-0"] Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.304273 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/memcached-0"] Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.304535 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5073fbf8-f2ef-49e7-8b07-d90b1822b414-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5073fbf8-f2ef-49e7-8b07-d90b1822b414" (UID: "5073fbf8-f2ef-49e7-8b07-d90b1822b414"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.307304 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5073fbf8-f2ef-49e7-8b07-d90b1822b414-kube-state-metrics-tls-certs" (OuterVolumeSpecName: "kube-state-metrics-tls-certs") pod "5073fbf8-f2ef-49e7-8b07-d90b1822b414" (UID: "5073fbf8-f2ef-49e7-8b07-d90b1822b414"). InnerVolumeSpecName "kube-state-metrics-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.311152 5021 scope.go:117] "RemoveContainer" containerID="feae471e1c50172422c6097ccee57bce6ab91a98c54d1223f046f0f30e158360" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.311361 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.318017 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.325361 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-7b8886d4fd-qn9sz"] Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.333034 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wkzv5\" (UniqueName: \"kubernetes.io/projected/5073fbf8-f2ef-49e7-8b07-d90b1822b414-kube-api-access-wkzv5\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.333337 5021 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/469c5416-c102-43c5-8801-502231a86238-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.333351 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-shw64\" (UniqueName: \"kubernetes.io/projected/270b5844-50a8-4a16-8c12-73c4c209aab1-kube-api-access-shw64\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.333364 5021 reconciler_common.go:293] "Volume detached for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/5073fbf8-f2ef-49e7-8b07-d90b1822b414-kube-state-metrics-tls-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.333380 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5073fbf8-f2ef-49e7-8b07-d90b1822b414-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.333395 5021 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/469c5416-c102-43c5-8801-502231a86238-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.333407 5021 reconciler_common.go:293] "Volume detached for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/5073fbf8-f2ef-49e7-8b07-d90b1822b414-kube-state-metrics-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.333111 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-7b8886d4fd-qn9sz"] Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.341413 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.343241 5021 scope.go:117] "RemoveContainer" containerID="f0fe818e9e2a058656b7e5d772bd6c84da9de9b5ca099a5c958f3ab93f7c5392" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.375997 5021 scope.go:117] "RemoveContainer" containerID="81466562175ec89583498029b73cadcc8d26846a5fbb385a68626b0ba993a0c2" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.376286 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.400872 5021 scope.go:117] "RemoveContainer" containerID="20a7c05d680426c518dcf812d8d7a9481aa09f5c574f8ae028a68e0fbb6c1a5e" Jan 21 15:48:59 crc kubenswrapper[5021]: E0121 15:48:59.434782 5021 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Jan 21 15:48:59 crc kubenswrapper[5021]: E0121 15:48:59.434865 5021 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-config-data podName:2dff28e1-6d0f-4a7d-8fcf-0edf26e63825 nodeName:}" failed. No retries permitted until 2026-01-21 15:49:07.434845556 +0000 UTC m=+1488.969959445 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-config-data") pod "rabbitmq-cell1-server-0" (UID: "2dff28e1-6d0f-4a7d-8fcf-0edf26e63825") : configmap "rabbitmq-cell1-config-data" not found Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.442481 5021 scope.go:117] "RemoveContainer" containerID="d29d9648241688d22882bb1ef26e5b75f7e75ce105b478819e5cb0b36d9eaa34" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.493088 5021 scope.go:117] "RemoveContainer" containerID="3e2ad39675705ef9c70fa28adb97c9a01666ba00b4202ad69b0fcb8f9b4aba7d" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.533851 5021 scope.go:117] "RemoveContainer" containerID="b0be39c8ae52be02d5990fbd6de2c149adc18e9a2711ad760bb76af11a809a0e" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.556376 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-zl2nk"] Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.565341 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-zl2nk"] Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.576409 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-57f8ddbc76-dgfjh"] Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.578013 5021 scope.go:117] "RemoveContainer" containerID="9d81dbb84baa56528fb5b7d80ccd46ca92f10dbc96d74d445172433eb0f4dc44" Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.588281 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-57f8ddbc76-dgfjh"] Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.596759 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.605818 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 21 15:48:59 crc kubenswrapper[5021]: I0121 15:48:59.740120 5021 scope.go:117] "RemoveContainer" containerID="c6c487064a50a13818b80449ec45c315dcbd4c10322323fbef91c6b9e74ca755" Jan 21 15:49:00 crc kubenswrapper[5021]: E0121 15:49:00.117805 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4a59683749a5d31a2fcc97158cd1d9ceb81e127e3f847cce0c972bac9d288d8e" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Jan 21 15:49:00 crc kubenswrapper[5021]: E0121 15:49:00.119172 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4a59683749a5d31a2fcc97158cd1d9ceb81e127e3f847cce0c972bac9d288d8e" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Jan 21 15:49:00 crc kubenswrapper[5021]: E0121 15:49:00.120879 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4a59683749a5d31a2fcc97158cd1d9ceb81e127e3f847cce0c972bac9d288d8e" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Jan 21 15:49:00 crc kubenswrapper[5021]: E0121 15:49:00.120964 5021 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="4184ba08-6582-4367-abd3-9e9cffb5b716" containerName="galera" Jan 21 15:49:00 crc kubenswrapper[5021]: I0121 15:49:00.228317 5021 generic.go:334] "Generic (PLEG): container finished" podID="2dff28e1-6d0f-4a7d-8fcf-0edf26e63825" containerID="44ab8303cf36ed1256a72700def0f8fdb1a1e4a5f2dd2a14ca80a744759920ec" exitCode=0 Jan 21 15:49:00 crc kubenswrapper[5021]: I0121 15:49:00.228397 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825","Type":"ContainerDied","Data":"44ab8303cf36ed1256a72700def0f8fdb1a1e4a5f2dd2a14ca80a744759920ec"} Jan 21 15:49:00 crc kubenswrapper[5021]: I0121 15:49:00.230826 5021 generic.go:334] "Generic (PLEG): container finished" podID="b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b" containerID="d55fd3560f293f7b3d5438cf1c04fd0d68375dd2c61252e90dab6e4eb53445b2" exitCode=0 Jan 21 15:49:00 crc kubenswrapper[5021]: I0121 15:49:00.230885 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b","Type":"ContainerDied","Data":"d55fd3560f293f7b3d5438cf1c04fd0d68375dd2c61252e90dab6e4eb53445b2"} Jan 21 15:49:00 crc kubenswrapper[5021]: I0121 15:49:00.236354 5021 generic.go:334] "Generic (PLEG): container finished" podID="1093d499-bd73-4de4-b999-a7e9835b3124" containerID="de590a6c44256e84fcc664627be518c9fb2c460d8c59cdb3123cc99eebe47520" exitCode=0 Jan 21 15:49:00 crc kubenswrapper[5021]: I0121 15:49:00.236404 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"1093d499-bd73-4de4-b999-a7e9835b3124","Type":"ContainerDied","Data":"de590a6c44256e84fcc664627be518c9fb2c460d8c59cdb3123cc99eebe47520"} Jan 21 15:49:00 crc kubenswrapper[5021]: I0121 15:49:00.759543 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0415e622-e0cf-4097-865a-a0970f2acc07" path="/var/lib/kubelet/pods/0415e622-e0cf-4097-865a-a0970f2acc07/volumes" Jan 21 15:49:00 crc kubenswrapper[5021]: I0121 15:49:00.760451 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="110a1110-f52a-40e4-8402-166be87650a8" path="/var/lib/kubelet/pods/110a1110-f52a-40e4-8402-166be87650a8/volumes" Jan 21 15:49:00 crc kubenswrapper[5021]: I0121 15:49:00.761148 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="270b5844-50a8-4a16-8c12-73c4c209aab1" path="/var/lib/kubelet/pods/270b5844-50a8-4a16-8c12-73c4c209aab1/volumes" Jan 21 15:49:00 crc kubenswrapper[5021]: I0121 15:49:00.762029 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="446aadfb-ac91-4335-9bac-4f8d7663ab6a" path="/var/lib/kubelet/pods/446aadfb-ac91-4335-9bac-4f8d7663ab6a/volumes" Jan 21 15:49:00 crc kubenswrapper[5021]: I0121 15:49:00.762625 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="469c5416-c102-43c5-8801-502231a86238" path="/var/lib/kubelet/pods/469c5416-c102-43c5-8801-502231a86238/volumes" Jan 21 15:49:00 crc kubenswrapper[5021]: I0121 15:49:00.763284 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5073fbf8-f2ef-49e7-8b07-d90b1822b414" path="/var/lib/kubelet/pods/5073fbf8-f2ef-49e7-8b07-d90b1822b414/volumes" Jan 21 15:49:00 crc kubenswrapper[5021]: I0121 15:49:00.764222 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7ccf7211-3a03-41f1-839a-7bda93e55d4b" path="/var/lib/kubelet/pods/7ccf7211-3a03-41f1-839a-7bda93e55d4b/volumes" Jan 21 15:49:00 crc kubenswrapper[5021]: I0121 15:49:00.764941 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a5d30216-0406-4ff3-a645-880381c2a661" path="/var/lib/kubelet/pods/a5d30216-0406-4ff3-a645-880381c2a661/volumes" Jan 21 15:49:00 crc kubenswrapper[5021]: I0121 15:49:00.765459 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a6ed9dcf-812f-4945-ac9d-43839bb27349" path="/var/lib/kubelet/pods/a6ed9dcf-812f-4945-ac9d-43839bb27349/volumes" Jan 21 15:49:00 crc kubenswrapper[5021]: I0121 15:49:00.766500 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ad959625-d43f-48c3-b42f-d35e63e9af44" path="/var/lib/kubelet/pods/ad959625-d43f-48c3-b42f-d35e63e9af44/volumes" Jan 21 15:49:00 crc kubenswrapper[5021]: I0121 15:49:00.767081 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fad66107-0589-4ed8-94dc-fd29f2f58c43" path="/var/lib/kubelet/pods/fad66107-0589-4ed8-94dc-fd29f2f58c43/volumes" Jan 21 15:49:00 crc kubenswrapper[5021]: I0121 15:49:00.767660 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fb584a2d-b396-4850-a7b5-3d827c42fe5a" path="/var/lib/kubelet/pods/fb584a2d-b396-4850-a7b5-3d827c42fe5a/volumes" Jan 21 15:49:00 crc kubenswrapper[5021]: I0121 15:49:00.932344 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.070434 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-rabbitmq-confd\") pod \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\" (UID: \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\") " Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.070512 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-erlang-cookie-secret\") pod \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\" (UID: \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\") " Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.070546 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-rabbitmq-erlang-cookie\") pod \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\" (UID: \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\") " Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.070610 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-rabbitmq-tls\") pod \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\" (UID: \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\") " Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.070667 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-pod-info\") pod \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\" (UID: \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\") " Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.070725 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\" (UID: \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\") " Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.070772 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-config-data\") pod \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\" (UID: \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\") " Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.070802 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-server-conf\") pod \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\" (UID: \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\") " Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.070861 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hrggg\" (UniqueName: \"kubernetes.io/projected/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-kube-api-access-hrggg\") pod \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\" (UID: \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\") " Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.070885 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-rabbitmq-plugins\") pod \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\" (UID: \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\") " Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.070903 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-plugins-conf\") pod \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\" (UID: \"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b\") " Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.071957 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b" (UID: "b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.072411 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b" (UID: "b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.072778 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b" (UID: "b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.076714 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "persistence") pod "b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b" (UID: "b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.076817 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b" (UID: "b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.078378 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-kube-api-access-hrggg" (OuterVolumeSpecName: "kube-api-access-hrggg") pod "b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b" (UID: "b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b"). InnerVolumeSpecName "kube-api-access-hrggg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.080837 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-pod-info" (OuterVolumeSpecName: "pod-info") pod "b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b" (UID: "b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.086095 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b" (UID: "b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.108659 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-config-data" (OuterVolumeSpecName: "config-data") pod "b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b" (UID: "b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.124936 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.126022 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-server-conf" (OuterVolumeSpecName: "server-conf") pod "b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b" (UID: "b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.134537 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.172866 5021 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.172922 5021 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.172937 5021 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-pod-info\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.172966 5021 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.172978 5021 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.172989 5021 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-server-conf\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.173009 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hrggg\" (UniqueName: \"kubernetes.io/projected/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-kube-api-access-hrggg\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.173021 5021 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.173031 5021 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.173041 5021 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.195829 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b" (UID: "b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.196040 5021 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.271276 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"1093d499-bd73-4de4-b999-a7e9835b3124","Type":"ContainerDied","Data":"4256fb6bbf6e559c69ce4bf690d3197064a3e9af1240726ef8a6c3af1cb2b1de"} Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.271332 5021 scope.go:117] "RemoveContainer" containerID="de590a6c44256e84fcc664627be518c9fb2c460d8c59cdb3123cc99eebe47520" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.271455 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.275825 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/1093d499-bd73-4de4-b999-a7e9835b3124-config-data-default\") pod \"1093d499-bd73-4de4-b999-a7e9835b3124\" (UID: \"1093d499-bd73-4de4-b999-a7e9835b3124\") " Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.275867 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nltwh\" (UniqueName: \"kubernetes.io/projected/1093d499-bd73-4de4-b999-a7e9835b3124-kube-api-access-nltwh\") pod \"1093d499-bd73-4de4-b999-a7e9835b3124\" (UID: \"1093d499-bd73-4de4-b999-a7e9835b3124\") " Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.276400 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1093d499-bd73-4de4-b999-a7e9835b3124-config-data-default" (OuterVolumeSpecName: "config-data-default") pod "1093d499-bd73-4de4-b999-a7e9835b3124" (UID: "1093d499-bd73-4de4-b999-a7e9835b3124"). InnerVolumeSpecName "config-data-default". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.284471 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mysql-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"1093d499-bd73-4de4-b999-a7e9835b3124\" (UID: \"1093d499-bd73-4de4-b999-a7e9835b3124\") " Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.284570 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-rabbitmq-tls\") pod \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\" (UID: \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\") " Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.284618 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-server-conf\") pod \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\" (UID: \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\") " Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.284694 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-pod-info\") pod \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\" (UID: \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\") " Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.284722 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-rabbitmq-plugins\") pod \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\" (UID: \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\") " Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.284746 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/1093d499-bd73-4de4-b999-a7e9835b3124-galera-tls-certs\") pod \"1093d499-bd73-4de4-b999-a7e9835b3124\" (UID: \"1093d499-bd73-4de4-b999-a7e9835b3124\") " Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.285276 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1093d499-bd73-4de4-b999-a7e9835b3124-kube-api-access-nltwh" (OuterVolumeSpecName: "kube-api-access-nltwh") pod "1093d499-bd73-4de4-b999-a7e9835b3124" (UID: "1093d499-bd73-4de4-b999-a7e9835b3124"). InnerVolumeSpecName "kube-api-access-nltwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.284767 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1093d499-bd73-4de4-b999-a7e9835b3124-combined-ca-bundle\") pod \"1093d499-bd73-4de4-b999-a7e9835b3124\" (UID: \"1093d499-bd73-4de4-b999-a7e9835b3124\") " Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.285595 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-rabbitmq-confd\") pod \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\" (UID: \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\") " Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.285696 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1093d499-bd73-4de4-b999-a7e9835b3124-operator-scripts\") pod \"1093d499-bd73-4de4-b999-a7e9835b3124\" (UID: \"1093d499-bd73-4de4-b999-a7e9835b3124\") " Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.285721 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vmjw7\" (UniqueName: \"kubernetes.io/projected/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-kube-api-access-vmjw7\") pod \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\" (UID: \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\") " Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.285748 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/1093d499-bd73-4de4-b999-a7e9835b3124-config-data-generated\") pod \"1093d499-bd73-4de4-b999-a7e9835b3124\" (UID: \"1093d499-bd73-4de4-b999-a7e9835b3124\") " Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.285766 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\" (UID: \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\") " Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.285791 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-rabbitmq-erlang-cookie\") pod \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\" (UID: \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\") " Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.285821 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1093d499-bd73-4de4-b999-a7e9835b3124-kolla-config\") pod \"1093d499-bd73-4de4-b999-a7e9835b3124\" (UID: \"1093d499-bd73-4de4-b999-a7e9835b3124\") " Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.285847 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-config-data\") pod \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\" (UID: \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\") " Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.285876 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-erlang-cookie-secret\") pod \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\" (UID: \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\") " Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.285892 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-plugins-conf\") pod \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\" (UID: \"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825\") " Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.286158 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1093d499-bd73-4de4-b999-a7e9835b3124-config-data-generated" (OuterVolumeSpecName: "config-data-generated") pod "1093d499-bd73-4de4-b999-a7e9835b3124" (UID: "1093d499-bd73-4de4-b999-a7e9835b3124"). InnerVolumeSpecName "config-data-generated". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.286201 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "2dff28e1-6d0f-4a7d-8fcf-0edf26e63825" (UID: "2dff28e1-6d0f-4a7d-8fcf-0edf26e63825"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.286447 5021 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.286463 5021 reconciler_common.go:293] "Volume detached for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/1093d499-bd73-4de4-b999-a7e9835b3124-config-data-generated\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.286478 5021 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.286489 5021 reconciler_common.go:293] "Volume detached for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/1093d499-bd73-4de4-b999-a7e9835b3124-config-data-default\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.286501 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nltwh\" (UniqueName: \"kubernetes.io/projected/1093d499-bd73-4de4-b999-a7e9835b3124-kube-api-access-nltwh\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.286513 5021 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.287411 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.288017 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"2dff28e1-6d0f-4a7d-8fcf-0edf26e63825","Type":"ContainerDied","Data":"18eda8ac2925e2cf2b28c3175241388e4946c1f363922b567487dfc58652b6bd"} Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.289721 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1093d499-bd73-4de4-b999-a7e9835b3124-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1093d499-bd73-4de4-b999-a7e9835b3124" (UID: "1093d499-bd73-4de4-b999-a7e9835b3124"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.290674 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "2dff28e1-6d0f-4a7d-8fcf-0edf26e63825" (UID: "2dff28e1-6d0f-4a7d-8fcf-0edf26e63825"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.288632 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "2dff28e1-6d0f-4a7d-8fcf-0edf26e63825" (UID: "2dff28e1-6d0f-4a7d-8fcf-0edf26e63825"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.293576 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "2dff28e1-6d0f-4a7d-8fcf-0edf26e63825" (UID: "2dff28e1-6d0f-4a7d-8fcf-0edf26e63825"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.294501 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-kube-api-access-vmjw7" (OuterVolumeSpecName: "kube-api-access-vmjw7") pod "2dff28e1-6d0f-4a7d-8fcf-0edf26e63825" (UID: "2dff28e1-6d0f-4a7d-8fcf-0edf26e63825"). InnerVolumeSpecName "kube-api-access-vmjw7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.294645 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage07-crc" (OuterVolumeSpecName: "persistence") pod "2dff28e1-6d0f-4a7d-8fcf-0edf26e63825" (UID: "2dff28e1-6d0f-4a7d-8fcf-0edf26e63825"). InnerVolumeSpecName "local-storage07-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.295301 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "2dff28e1-6d0f-4a7d-8fcf-0edf26e63825" (UID: "2dff28e1-6d0f-4a7d-8fcf-0edf26e63825"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.297581 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1093d499-bd73-4de4-b999-a7e9835b3124-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "1093d499-bd73-4de4-b999-a7e9835b3124" (UID: "1093d499-bd73-4de4-b999-a7e9835b3124"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.297997 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.298016 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b","Type":"ContainerDied","Data":"1c19d0536ad45b68d5cd75e86c8e9065109f53441517ef388a47279c31e271f0"} Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.304172 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "mysql-db") pod "1093d499-bd73-4de4-b999-a7e9835b3124" (UID: "1093d499-bd73-4de4-b999-a7e9835b3124"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.309676 5021 scope.go:117] "RemoveContainer" containerID="8565907c6fd51c5e42e5d3b76024f497edf5f9e73fad7968004b1b553d69be4c" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.313496 5021 generic.go:334] "Generic (PLEG): container finished" podID="e56d063f-18e5-49af-8bfc-892629a34e88" containerID="fbc7ac5c64b14be83eb8080ee8b54339e51698653f7de8500a2e5fc7fd361ff2" exitCode=0 Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.313547 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"e56d063f-18e5-49af-8bfc-892629a34e88","Type":"ContainerDied","Data":"fbc7ac5c64b14be83eb8080ee8b54339e51698653f7de8500a2e5fc7fd361ff2"} Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.316929 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-pod-info" (OuterVolumeSpecName: "pod-info") pod "2dff28e1-6d0f-4a7d-8fcf-0edf26e63825" (UID: "2dff28e1-6d0f-4a7d-8fcf-0edf26e63825"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.321110 5021 generic.go:334] "Generic (PLEG): container finished" podID="e7a38a9d-65cf-48dd-8f36-44a78a53e48f" containerID="7673fa928a9d34d9093a07ad100e4f08c6bac6b0eb9a73dd956508ac3f6d49ca" exitCode=0 Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.321164 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-76bc56d748-8glcs" event={"ID":"e7a38a9d-65cf-48dd-8f36-44a78a53e48f","Type":"ContainerDied","Data":"7673fa928a9d34d9093a07ad100e4f08c6bac6b0eb9a73dd956508ac3f6d49ca"} Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.328299 5021 generic.go:334] "Generic (PLEG): container finished" podID="0787e96e-5c19-467d-9ad4-ec70202c8cdf" containerID="16060f449a7025ebebb9de9a238ef2f530ca6e0bfb74d144bfcdaf9b91b44f23" exitCode=0 Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.328536 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"0787e96e-5c19-467d-9ad4-ec70202c8cdf","Type":"ContainerDied","Data":"16060f449a7025ebebb9de9a238ef2f530ca6e0bfb74d144bfcdaf9b91b44f23"} Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.337587 5021 generic.go:334] "Generic (PLEG): container finished" podID="4d076ab0-b0c8-48a0-baa0-589c99376c72" containerID="e1e5425a7a11ce797c6d259dce5739f59a0a9337b9e52538e7943644bd38dc3e" exitCode=0 Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.337631 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7bc8f89b55-8c6t2" event={"ID":"4d076ab0-b0c8-48a0-baa0-589c99376c72","Type":"ContainerDied","Data":"e1e5425a7a11ce797c6d259dce5739f59a0a9337b9e52538e7943644bd38dc3e"} Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.343997 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.348822 5021 scope.go:117] "RemoveContainer" containerID="44ab8303cf36ed1256a72700def0f8fdb1a1e4a5f2dd2a14ca80a744759920ec" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.351061 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-config-data" (OuterVolumeSpecName: "config-data") pod "2dff28e1-6d0f-4a7d-8fcf-0edf26e63825" (UID: "2dff28e1-6d0f-4a7d-8fcf-0edf26e63825"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.354467 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.362529 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1093d499-bd73-4de4-b999-a7e9835b3124-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1093d499-bd73-4de4-b999-a7e9835b3124" (UID: "1093d499-bd73-4de4-b999-a7e9835b3124"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.367090 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-server-conf" (OuterVolumeSpecName: "server-conf") pod "2dff28e1-6d0f-4a7d-8fcf-0edf26e63825" (UID: "2dff28e1-6d0f-4a7d-8fcf-0edf26e63825"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.388646 5021 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" " Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.388719 5021 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.388767 5021 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1093d499-bd73-4de4-b999-a7e9835b3124-kolla-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.388782 5021 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.388795 5021 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.388813 5021 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.388847 5021 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.388858 5021 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.388866 5021 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-server-conf\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.388874 5021 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-pod-info\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.388884 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1093d499-bd73-4de4-b999-a7e9835b3124-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.388893 5021 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1093d499-bd73-4de4-b999-a7e9835b3124-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.388902 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vmjw7\" (UniqueName: \"kubernetes.io/projected/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-kube-api-access-vmjw7\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.399427 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1093d499-bd73-4de4-b999-a7e9835b3124-galera-tls-certs" (OuterVolumeSpecName: "galera-tls-certs") pod "1093d499-bd73-4de4-b999-a7e9835b3124" (UID: "1093d499-bd73-4de4-b999-a7e9835b3124"). InnerVolumeSpecName "galera-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.412134 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-7bc8f89b55-8c6t2" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.414977 5021 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage07-crc" (UniqueName: "kubernetes.io/local-volume/local-storage07-crc") on node "crc" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.418887 5021 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.419890 5021 scope.go:117] "RemoveContainer" containerID="4d967a419656e209fc1a4f481f044e1e7ce77cfe738f17ca1985fd75b33cc897" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.439150 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "2dff28e1-6d0f-4a7d-8fcf-0edf26e63825" (UID: "2dff28e1-6d0f-4a7d-8fcf-0edf26e63825"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.485749 5021 scope.go:117] "RemoveContainer" containerID="d55fd3560f293f7b3d5438cf1c04fd0d68375dd2c61252e90dab6e4eb53445b2" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.492818 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4d076ab0-b0c8-48a0-baa0-589c99376c72-logs\") pod \"4d076ab0-b0c8-48a0-baa0-589c99376c72\" (UID: \"4d076ab0-b0c8-48a0-baa0-589c99376c72\") " Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.494012 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4d076ab0-b0c8-48a0-baa0-589c99376c72-logs" (OuterVolumeSpecName: "logs") pod "4d076ab0-b0c8-48a0-baa0-589c99376c72" (UID: "4d076ab0-b0c8-48a0-baa0-589c99376c72"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.508157 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4d076ab0-b0c8-48a0-baa0-589c99376c72-config-data-custom\") pod \"4d076ab0-b0c8-48a0-baa0-589c99376c72\" (UID: \"4d076ab0-b0c8-48a0-baa0-589c99376c72\") " Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.508246 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d076ab0-b0c8-48a0-baa0-589c99376c72-combined-ca-bundle\") pod \"4d076ab0-b0c8-48a0-baa0-589c99376c72\" (UID: \"4d076ab0-b0c8-48a0-baa0-589c99376c72\") " Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.508291 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4d076ab0-b0c8-48a0-baa0-589c99376c72-config-data\") pod \"4d076ab0-b0c8-48a0-baa0-589c99376c72\" (UID: \"4d076ab0-b0c8-48a0-baa0-589c99376c72\") " Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.508347 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vrsnl\" (UniqueName: \"kubernetes.io/projected/4d076ab0-b0c8-48a0-baa0-589c99376c72-kube-api-access-vrsnl\") pod \"4d076ab0-b0c8-48a0-baa0-589c99376c72\" (UID: \"4d076ab0-b0c8-48a0-baa0-589c99376c72\") " Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.518398 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4d076ab0-b0c8-48a0-baa0-589c99376c72-kube-api-access-vrsnl" (OuterVolumeSpecName: "kube-api-access-vrsnl") pod "4d076ab0-b0c8-48a0-baa0-589c99376c72" (UID: "4d076ab0-b0c8-48a0-baa0-589c99376c72"). InnerVolumeSpecName "kube-api-access-vrsnl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.518488 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4d076ab0-b0c8-48a0-baa0-589c99376c72-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "4d076ab0-b0c8-48a0-baa0-589c99376c72" (UID: "4d076ab0-b0c8-48a0-baa0-589c99376c72"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.519193 5021 reconciler_common.go:293] "Volume detached for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/1093d499-bd73-4de4-b999-a7e9835b3124-galera-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.519218 5021 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4d076ab0-b0c8-48a0-baa0-589c99376c72-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.519231 5021 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.519244 5021 reconciler_common.go:293] "Volume detached for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.519259 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vrsnl\" (UniqueName: \"kubernetes.io/projected/4d076ab0-b0c8-48a0-baa0-589c99376c72-kube-api-access-vrsnl\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.519270 5021 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.519281 5021 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4d076ab0-b0c8-48a0-baa0-589c99376c72-logs\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.558168 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4d076ab0-b0c8-48a0-baa0-589c99376c72-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4d076ab0-b0c8-48a0-baa0-589c99376c72" (UID: "4d076ab0-b0c8-48a0-baa0-589c99376c72"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.559519 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4d076ab0-b0c8-48a0-baa0-589c99376c72-config-data" (OuterVolumeSpecName: "config-data") pod "4d076ab0-b0c8-48a0-baa0-589c99376c72" (UID: "4d076ab0-b0c8-48a0-baa0-589c99376c72"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.621614 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d076ab0-b0c8-48a0-baa0-589c99376c72-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.621836 5021 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4d076ab0-b0c8-48a0-baa0-589c99376c72-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.630969 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.637689 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.701384 5021 scope.go:117] "RemoveContainer" containerID="d0442856928dc1e5b8f3e11f88a250e0738b0f4e137890a5b28dc4c331684638" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.733850 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.739473 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.811355 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.817520 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.836387 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-76bc56d748-8glcs" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.931731 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e56d063f-18e5-49af-8bfc-892629a34e88-combined-ca-bundle\") pod \"e56d063f-18e5-49af-8bfc-892629a34e88\" (UID: \"e56d063f-18e5-49af-8bfc-892629a34e88\") " Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.931790 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0787e96e-5c19-467d-9ad4-ec70202c8cdf-config-data\") pod \"0787e96e-5c19-467d-9ad4-ec70202c8cdf\" (UID: \"0787e96e-5c19-467d-9ad4-ec70202c8cdf\") " Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.931818 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-85vfh\" (UniqueName: \"kubernetes.io/projected/e56d063f-18e5-49af-8bfc-892629a34e88-kube-api-access-85vfh\") pod \"e56d063f-18e5-49af-8bfc-892629a34e88\" (UID: \"e56d063f-18e5-49af-8bfc-892629a34e88\") " Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.931867 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kt9fd\" (UniqueName: \"kubernetes.io/projected/0787e96e-5c19-467d-9ad4-ec70202c8cdf-kube-api-access-kt9fd\") pod \"0787e96e-5c19-467d-9ad4-ec70202c8cdf\" (UID: \"0787e96e-5c19-467d-9ad4-ec70202c8cdf\") " Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.932120 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rtmt8\" (UniqueName: \"kubernetes.io/projected/e7a38a9d-65cf-48dd-8f36-44a78a53e48f-kube-api-access-rtmt8\") pod \"e7a38a9d-65cf-48dd-8f36-44a78a53e48f\" (UID: \"e7a38a9d-65cf-48dd-8f36-44a78a53e48f\") " Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.932172 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e56d063f-18e5-49af-8bfc-892629a34e88-config-data\") pod \"e56d063f-18e5-49af-8bfc-892629a34e88\" (UID: \"e56d063f-18e5-49af-8bfc-892629a34e88\") " Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.932211 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0787e96e-5c19-467d-9ad4-ec70202c8cdf-combined-ca-bundle\") pod \"0787e96e-5c19-467d-9ad4-ec70202c8cdf\" (UID: \"0787e96e-5c19-467d-9ad4-ec70202c8cdf\") " Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.932246 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e7a38a9d-65cf-48dd-8f36-44a78a53e48f-config-data-custom\") pod \"e7a38a9d-65cf-48dd-8f36-44a78a53e48f\" (UID: \"e7a38a9d-65cf-48dd-8f36-44a78a53e48f\") " Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.936288 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7a38a9d-65cf-48dd-8f36-44a78a53e48f-kube-api-access-rtmt8" (OuterVolumeSpecName: "kube-api-access-rtmt8") pod "e7a38a9d-65cf-48dd-8f36-44a78a53e48f" (UID: "e7a38a9d-65cf-48dd-8f36-44a78a53e48f"). InnerVolumeSpecName "kube-api-access-rtmt8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.937002 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0787e96e-5c19-467d-9ad4-ec70202c8cdf-kube-api-access-kt9fd" (OuterVolumeSpecName: "kube-api-access-kt9fd") pod "0787e96e-5c19-467d-9ad4-ec70202c8cdf" (UID: "0787e96e-5c19-467d-9ad4-ec70202c8cdf"). InnerVolumeSpecName "kube-api-access-kt9fd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.937695 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7a38a9d-65cf-48dd-8f36-44a78a53e48f-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "e7a38a9d-65cf-48dd-8f36-44a78a53e48f" (UID: "e7a38a9d-65cf-48dd-8f36-44a78a53e48f"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.938038 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e56d063f-18e5-49af-8bfc-892629a34e88-kube-api-access-85vfh" (OuterVolumeSpecName: "kube-api-access-85vfh") pod "e56d063f-18e5-49af-8bfc-892629a34e88" (UID: "e56d063f-18e5-49af-8bfc-892629a34e88"). InnerVolumeSpecName "kube-api-access-85vfh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.955095 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0787e96e-5c19-467d-9ad4-ec70202c8cdf-config-data" (OuterVolumeSpecName: "config-data") pod "0787e96e-5c19-467d-9ad4-ec70202c8cdf" (UID: "0787e96e-5c19-467d-9ad4-ec70202c8cdf"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.959831 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e56d063f-18e5-49af-8bfc-892629a34e88-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e56d063f-18e5-49af-8bfc-892629a34e88" (UID: "e56d063f-18e5-49af-8bfc-892629a34e88"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.960333 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0787e96e-5c19-467d-9ad4-ec70202c8cdf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0787e96e-5c19-467d-9ad4-ec70202c8cdf" (UID: "0787e96e-5c19-467d-9ad4-ec70202c8cdf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:49:01 crc kubenswrapper[5021]: I0121 15:49:01.968788 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e56d063f-18e5-49af-8bfc-892629a34e88-config-data" (OuterVolumeSpecName: "config-data") pod "e56d063f-18e5-49af-8bfc-892629a34e88" (UID: "e56d063f-18e5-49af-8bfc-892629a34e88"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.034310 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7a38a9d-65cf-48dd-8f36-44a78a53e48f-combined-ca-bundle\") pod \"e7a38a9d-65cf-48dd-8f36-44a78a53e48f\" (UID: \"e7a38a9d-65cf-48dd-8f36-44a78a53e48f\") " Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.034720 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e7a38a9d-65cf-48dd-8f36-44a78a53e48f-config-data\") pod \"e7a38a9d-65cf-48dd-8f36-44a78a53e48f\" (UID: \"e7a38a9d-65cf-48dd-8f36-44a78a53e48f\") " Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.034820 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e7a38a9d-65cf-48dd-8f36-44a78a53e48f-logs\") pod \"e7a38a9d-65cf-48dd-8f36-44a78a53e48f\" (UID: \"e7a38a9d-65cf-48dd-8f36-44a78a53e48f\") " Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.035156 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0787e96e-5c19-467d-9ad4-ec70202c8cdf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.035175 5021 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e7a38a9d-65cf-48dd-8f36-44a78a53e48f-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.035187 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e56d063f-18e5-49af-8bfc-892629a34e88-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.035200 5021 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0787e96e-5c19-467d-9ad4-ec70202c8cdf-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.035212 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-85vfh\" (UniqueName: \"kubernetes.io/projected/e56d063f-18e5-49af-8bfc-892629a34e88-kube-api-access-85vfh\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.035226 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kt9fd\" (UniqueName: \"kubernetes.io/projected/0787e96e-5c19-467d-9ad4-ec70202c8cdf-kube-api-access-kt9fd\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.035238 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rtmt8\" (UniqueName: \"kubernetes.io/projected/e7a38a9d-65cf-48dd-8f36-44a78a53e48f-kube-api-access-rtmt8\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.035249 5021 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e56d063f-18e5-49af-8bfc-892629a34e88-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.035378 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e7a38a9d-65cf-48dd-8f36-44a78a53e48f-logs" (OuterVolumeSpecName: "logs") pod "e7a38a9d-65cf-48dd-8f36-44a78a53e48f" (UID: "e7a38a9d-65cf-48dd-8f36-44a78a53e48f"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.062399 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7a38a9d-65cf-48dd-8f36-44a78a53e48f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e7a38a9d-65cf-48dd-8f36-44a78a53e48f" (UID: "e7a38a9d-65cf-48dd-8f36-44a78a53e48f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.090941 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7a38a9d-65cf-48dd-8f36-44a78a53e48f-config-data" (OuterVolumeSpecName: "config-data") pod "e7a38a9d-65cf-48dd-8f36-44a78a53e48f" (UID: "e7a38a9d-65cf-48dd-8f36-44a78a53e48f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.137927 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7a38a9d-65cf-48dd-8f36-44a78a53e48f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.138012 5021 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e7a38a9d-65cf-48dd-8f36-44a78a53e48f-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.138027 5021 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e7a38a9d-65cf-48dd-8f36-44a78a53e48f-logs\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.226804 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-548db5cc6d-pjhdh" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.340649 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4d4c24c-c623-4b7a-92e2-151d132cdebf-public-tls-certs\") pod \"b4d4c24c-c623-4b7a-92e2-151d132cdebf\" (UID: \"b4d4c24c-c623-4b7a-92e2-151d132cdebf\") " Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.340684 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4d4c24c-c623-4b7a-92e2-151d132cdebf-internal-tls-certs\") pod \"b4d4c24c-c623-4b7a-92e2-151d132cdebf\" (UID: \"b4d4c24c-c623-4b7a-92e2-151d132cdebf\") " Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.340720 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4d4c24c-c623-4b7a-92e2-151d132cdebf-config-data\") pod \"b4d4c24c-c623-4b7a-92e2-151d132cdebf\" (UID: \"b4d4c24c-c623-4b7a-92e2-151d132cdebf\") " Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.340745 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4d4c24c-c623-4b7a-92e2-151d132cdebf-combined-ca-bundle\") pod \"b4d4c24c-c623-4b7a-92e2-151d132cdebf\" (UID: \"b4d4c24c-c623-4b7a-92e2-151d132cdebf\") " Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.340850 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4d4c24c-c623-4b7a-92e2-151d132cdebf-scripts\") pod \"b4d4c24c-c623-4b7a-92e2-151d132cdebf\" (UID: \"b4d4c24c-c623-4b7a-92e2-151d132cdebf\") " Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.340888 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b4d4c24c-c623-4b7a-92e2-151d132cdebf-fernet-keys\") pod \"b4d4c24c-c623-4b7a-92e2-151d132cdebf\" (UID: \"b4d4c24c-c623-4b7a-92e2-151d132cdebf\") " Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.340925 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9gls6\" (UniqueName: \"kubernetes.io/projected/b4d4c24c-c623-4b7a-92e2-151d132cdebf-kube-api-access-9gls6\") pod \"b4d4c24c-c623-4b7a-92e2-151d132cdebf\" (UID: \"b4d4c24c-c623-4b7a-92e2-151d132cdebf\") " Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.340946 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b4d4c24c-c623-4b7a-92e2-151d132cdebf-credential-keys\") pod \"b4d4c24c-c623-4b7a-92e2-151d132cdebf\" (UID: \"b4d4c24c-c623-4b7a-92e2-151d132cdebf\") " Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.344740 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4d4c24c-c623-4b7a-92e2-151d132cdebf-scripts" (OuterVolumeSpecName: "scripts") pod "b4d4c24c-c623-4b7a-92e2-151d132cdebf" (UID: "b4d4c24c-c623-4b7a-92e2-151d132cdebf"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.345492 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b4d4c24c-c623-4b7a-92e2-151d132cdebf-kube-api-access-9gls6" (OuterVolumeSpecName: "kube-api-access-9gls6") pod "b4d4c24c-c623-4b7a-92e2-151d132cdebf" (UID: "b4d4c24c-c623-4b7a-92e2-151d132cdebf"). InnerVolumeSpecName "kube-api-access-9gls6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.346303 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4d4c24c-c623-4b7a-92e2-151d132cdebf-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "b4d4c24c-c623-4b7a-92e2-151d132cdebf" (UID: "b4d4c24c-c623-4b7a-92e2-151d132cdebf"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.348107 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4d4c24c-c623-4b7a-92e2-151d132cdebf-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "b4d4c24c-c623-4b7a-92e2-151d132cdebf" (UID: "b4d4c24c-c623-4b7a-92e2-151d132cdebf"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.350696 5021 generic.go:334] "Generic (PLEG): container finished" podID="b4d4c24c-c623-4b7a-92e2-151d132cdebf" containerID="3f4a4bee362fa146509f572eb2487ecaf6587a8240e667f1b0d30966317f0b72" exitCode=0 Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.350757 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-548db5cc6d-pjhdh" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.350823 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-548db5cc6d-pjhdh" event={"ID":"b4d4c24c-c623-4b7a-92e2-151d132cdebf","Type":"ContainerDied","Data":"3f4a4bee362fa146509f572eb2487ecaf6587a8240e667f1b0d30966317f0b72"} Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.350856 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-548db5cc6d-pjhdh" event={"ID":"b4d4c24c-c623-4b7a-92e2-151d132cdebf","Type":"ContainerDied","Data":"d942e6a66a7e445a8aac03e15b7fda374a733e183576c824add7a5f9ebbe357d"} Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.350874 5021 scope.go:117] "RemoveContainer" containerID="3f4a4bee362fa146509f572eb2487ecaf6587a8240e667f1b0d30966317f0b72" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.356579 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"e56d063f-18e5-49af-8bfc-892629a34e88","Type":"ContainerDied","Data":"fd862b058cdb045584f4bc5c6ec6b0cd3b7643c4dc4aa55c188f531ac7f1d401"} Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.356601 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.360491 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-76bc56d748-8glcs" event={"ID":"e7a38a9d-65cf-48dd-8f36-44a78a53e48f","Type":"ContainerDied","Data":"3a228a68dd38adae7a3b53ceceb41c5424fa365376dd59acb9682e9b94206d80"} Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.360540 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-76bc56d748-8glcs" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.362374 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"0787e96e-5c19-467d-9ad4-ec70202c8cdf","Type":"ContainerDied","Data":"f48f3a48157f783b9f28a75f12a4e2f6551c62f71bff00ff6f521f0d158846a5"} Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.362712 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.368079 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-7bc8f89b55-8c6t2" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.368362 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7bc8f89b55-8c6t2" event={"ID":"4d076ab0-b0c8-48a0-baa0-589c99376c72","Type":"ContainerDied","Data":"67819029e1afeeda9cf789c13df258a83255bf3a2c486502ea66c51dcd815078"} Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.373075 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4d4c24c-c623-4b7a-92e2-151d132cdebf-config-data" (OuterVolumeSpecName: "config-data") pod "b4d4c24c-c623-4b7a-92e2-151d132cdebf" (UID: "b4d4c24c-c623-4b7a-92e2-151d132cdebf"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.382487 5021 scope.go:117] "RemoveContainer" containerID="3f4a4bee362fa146509f572eb2487ecaf6587a8240e667f1b0d30966317f0b72" Jan 21 15:49:02 crc kubenswrapper[5021]: E0121 15:49:02.383387 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3f4a4bee362fa146509f572eb2487ecaf6587a8240e667f1b0d30966317f0b72\": container with ID starting with 3f4a4bee362fa146509f572eb2487ecaf6587a8240e667f1b0d30966317f0b72 not found: ID does not exist" containerID="3f4a4bee362fa146509f572eb2487ecaf6587a8240e667f1b0d30966317f0b72" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.383423 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3f4a4bee362fa146509f572eb2487ecaf6587a8240e667f1b0d30966317f0b72"} err="failed to get container status \"3f4a4bee362fa146509f572eb2487ecaf6587a8240e667f1b0d30966317f0b72\": rpc error: code = NotFound desc = could not find container \"3f4a4bee362fa146509f572eb2487ecaf6587a8240e667f1b0d30966317f0b72\": container with ID starting with 3f4a4bee362fa146509f572eb2487ecaf6587a8240e667f1b0d30966317f0b72 not found: ID does not exist" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.383443 5021 scope.go:117] "RemoveContainer" containerID="fbc7ac5c64b14be83eb8080ee8b54339e51698653f7de8500a2e5fc7fd361ff2" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.405236 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4d4c24c-c623-4b7a-92e2-151d132cdebf-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "b4d4c24c-c623-4b7a-92e2-151d132cdebf" (UID: "b4d4c24c-c623-4b7a-92e2-151d132cdebf"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.405327 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4d4c24c-c623-4b7a-92e2-151d132cdebf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b4d4c24c-c623-4b7a-92e2-151d132cdebf" (UID: "b4d4c24c-c623-4b7a-92e2-151d132cdebf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.407415 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-76bc56d748-8glcs"] Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.419695 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-keystone-listener-76bc56d748-8glcs"] Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.424419 5021 scope.go:117] "RemoveContainer" containerID="7673fa928a9d34d9093a07ad100e4f08c6bac6b0eb9a73dd956508ac3f6d49ca" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.427205 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4d4c24c-c623-4b7a-92e2-151d132cdebf-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "b4d4c24c-c623-4b7a-92e2-151d132cdebf" (UID: "b4d4c24c-c623-4b7a-92e2-151d132cdebf"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.433332 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.442848 5021 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4d4c24c-c623-4b7a-92e2-151d132cdebf-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.442896 5021 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b4d4c24c-c623-4b7a-92e2-151d132cdebf-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.442930 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9gls6\" (UniqueName: \"kubernetes.io/projected/b4d4c24c-c623-4b7a-92e2-151d132cdebf-kube-api-access-9gls6\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.442943 5021 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b4d4c24c-c623-4b7a-92e2-151d132cdebf-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.442955 5021 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4d4c24c-c623-4b7a-92e2-151d132cdebf-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.442972 5021 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4d4c24c-c623-4b7a-92e2-151d132cdebf-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.442984 5021 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4d4c24c-c623-4b7a-92e2-151d132cdebf-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.442996 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4d4c24c-c623-4b7a-92e2-151d132cdebf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.444016 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.447421 5021 scope.go:117] "RemoveContainer" containerID="233265452bf90deac8f8558f1e900daf7f05dda82bc41b72a507e02c47ad409b" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.450737 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.456323 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.462590 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-7bc8f89b55-8c6t2"] Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.471192 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-worker-7bc8f89b55-8c6t2"] Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.482690 5021 scope.go:117] "RemoveContainer" containerID="16060f449a7025ebebb9de9a238ef2f530ca6e0bfb74d144bfcdaf9b91b44f23" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.510321 5021 scope.go:117] "RemoveContainer" containerID="e1e5425a7a11ce797c6d259dce5739f59a0a9337b9e52538e7943644bd38dc3e" Jan 21 15:49:02 crc kubenswrapper[5021]: E0121 15:49:02.518738 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c51e2000b1b5cdd558697f3c6e26fdd8147721993df1b96e0e8e62f974486b5d is running failed: container process not found" containerID="c51e2000b1b5cdd558697f3c6e26fdd8147721993df1b96e0e8e62f974486b5d" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 21 15:49:02 crc kubenswrapper[5021]: E0121 15:49:02.519031 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c51e2000b1b5cdd558697f3c6e26fdd8147721993df1b96e0e8e62f974486b5d is running failed: container process not found" containerID="c51e2000b1b5cdd558697f3c6e26fdd8147721993df1b96e0e8e62f974486b5d" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 21 15:49:02 crc kubenswrapper[5021]: E0121 15:49:02.519227 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c51e2000b1b5cdd558697f3c6e26fdd8147721993df1b96e0e8e62f974486b5d is running failed: container process not found" containerID="c51e2000b1b5cdd558697f3c6e26fdd8147721993df1b96e0e8e62f974486b5d" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 21 15:49:02 crc kubenswrapper[5021]: E0121 15:49:02.519257 5021 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c51e2000b1b5cdd558697f3c6e26fdd8147721993df1b96e0e8e62f974486b5d is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-bk98m" podUID="06ba8703-2573-4c30-82ec-36290cf378f4" containerName="ovsdb-server" Jan 21 15:49:02 crc kubenswrapper[5021]: E0121 15:49:02.520295 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c9aea0f956b83c345479dd657b4586b966e9d9026a0a0d504fe374b1d7498204" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 21 15:49:02 crc kubenswrapper[5021]: E0121 15:49:02.521372 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c9aea0f956b83c345479dd657b4586b966e9d9026a0a0d504fe374b1d7498204" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 21 15:49:02 crc kubenswrapper[5021]: E0121 15:49:02.522269 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c9aea0f956b83c345479dd657b4586b966e9d9026a0a0d504fe374b1d7498204" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 21 15:49:02 crc kubenswrapper[5021]: E0121 15:49:02.522305 5021 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-bk98m" podUID="06ba8703-2573-4c30-82ec-36290cf378f4" containerName="ovs-vswitchd" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.539646 5021 scope.go:117] "RemoveContainer" containerID="fcc7d2e930abd6b478f82fcfe23ce92e362592301c9c8fb4f5dea9d2b2bedb88" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.733149 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-548db5cc6d-pjhdh"] Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.753675 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0787e96e-5c19-467d-9ad4-ec70202c8cdf" path="/var/lib/kubelet/pods/0787e96e-5c19-467d-9ad4-ec70202c8cdf/volumes" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.754546 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1093d499-bd73-4de4-b999-a7e9835b3124" path="/var/lib/kubelet/pods/1093d499-bd73-4de4-b999-a7e9835b3124/volumes" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.755418 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2dff28e1-6d0f-4a7d-8fcf-0edf26e63825" path="/var/lib/kubelet/pods/2dff28e1-6d0f-4a7d-8fcf-0edf26e63825/volumes" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.756562 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4d076ab0-b0c8-48a0-baa0-589c99376c72" path="/var/lib/kubelet/pods/4d076ab0-b0c8-48a0-baa0-589c99376c72/volumes" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.757451 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b" path="/var/lib/kubelet/pods/b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b/volumes" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.758834 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e56d063f-18e5-49af-8bfc-892629a34e88" path="/var/lib/kubelet/pods/e56d063f-18e5-49af-8bfc-892629a34e88/volumes" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.759812 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7a38a9d-65cf-48dd-8f36-44a78a53e48f" path="/var/lib/kubelet/pods/e7a38a9d-65cf-48dd-8f36-44a78a53e48f/volumes" Jan 21 15:49:02 crc kubenswrapper[5021]: I0121 15:49:02.760671 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-548db5cc6d-pjhdh"] Jan 21 15:49:04 crc kubenswrapper[5021]: I0121 15:49:04.440063 5021 generic.go:334] "Generic (PLEG): container finished" podID="55bbd00b-56a2-42a4-a75a-39daba5e3ba6" containerID="8d425e98d39b82e8ff834275afbbd09d41f458b740a780db59954fa6251d14e5" exitCode=0 Jan 21 15:49:04 crc kubenswrapper[5021]: I0121 15:49:04.440674 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"55bbd00b-56a2-42a4-a75a-39daba5e3ba6","Type":"ContainerDied","Data":"8d425e98d39b82e8ff834275afbbd09d41f458b740a780db59954fa6251d14e5"} Jan 21 15:49:04 crc kubenswrapper[5021]: I0121 15:49:04.655624 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 21 15:49:04 crc kubenswrapper[5021]: I0121 15:49:04.748773 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b4d4c24c-c623-4b7a-92e2-151d132cdebf" path="/var/lib/kubelet/pods/b4d4c24c-c623-4b7a-92e2-151d132cdebf/volumes" Jan 21 15:49:04 crc kubenswrapper[5021]: I0121 15:49:04.780608 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/55bbd00b-56a2-42a4-a75a-39daba5e3ba6-run-httpd\") pod \"55bbd00b-56a2-42a4-a75a-39daba5e3ba6\" (UID: \"55bbd00b-56a2-42a4-a75a-39daba5e3ba6\") " Jan 21 15:49:04 crc kubenswrapper[5021]: I0121 15:49:04.780897 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/55bbd00b-56a2-42a4-a75a-39daba5e3ba6-log-httpd\") pod \"55bbd00b-56a2-42a4-a75a-39daba5e3ba6\" (UID: \"55bbd00b-56a2-42a4-a75a-39daba5e3ba6\") " Jan 21 15:49:04 crc kubenswrapper[5021]: I0121 15:49:04.780984 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55bbd00b-56a2-42a4-a75a-39daba5e3ba6-combined-ca-bundle\") pod \"55bbd00b-56a2-42a4-a75a-39daba5e3ba6\" (UID: \"55bbd00b-56a2-42a4-a75a-39daba5e3ba6\") " Jan 21 15:49:04 crc kubenswrapper[5021]: I0121 15:49:04.781038 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/55bbd00b-56a2-42a4-a75a-39daba5e3ba6-ceilometer-tls-certs\") pod \"55bbd00b-56a2-42a4-a75a-39daba5e3ba6\" (UID: \"55bbd00b-56a2-42a4-a75a-39daba5e3ba6\") " Jan 21 15:49:04 crc kubenswrapper[5021]: I0121 15:49:04.781125 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fnfdf\" (UniqueName: \"kubernetes.io/projected/55bbd00b-56a2-42a4-a75a-39daba5e3ba6-kube-api-access-fnfdf\") pod \"55bbd00b-56a2-42a4-a75a-39daba5e3ba6\" (UID: \"55bbd00b-56a2-42a4-a75a-39daba5e3ba6\") " Jan 21 15:49:04 crc kubenswrapper[5021]: I0121 15:49:04.781176 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55bbd00b-56a2-42a4-a75a-39daba5e3ba6-config-data\") pod \"55bbd00b-56a2-42a4-a75a-39daba5e3ba6\" (UID: \"55bbd00b-56a2-42a4-a75a-39daba5e3ba6\") " Jan 21 15:49:04 crc kubenswrapper[5021]: I0121 15:49:04.781206 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/55bbd00b-56a2-42a4-a75a-39daba5e3ba6-sg-core-conf-yaml\") pod \"55bbd00b-56a2-42a4-a75a-39daba5e3ba6\" (UID: \"55bbd00b-56a2-42a4-a75a-39daba5e3ba6\") " Jan 21 15:49:04 crc kubenswrapper[5021]: I0121 15:49:04.781225 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/55bbd00b-56a2-42a4-a75a-39daba5e3ba6-scripts\") pod \"55bbd00b-56a2-42a4-a75a-39daba5e3ba6\" (UID: \"55bbd00b-56a2-42a4-a75a-39daba5e3ba6\") " Jan 21 15:49:04 crc kubenswrapper[5021]: I0121 15:49:04.781483 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/55bbd00b-56a2-42a4-a75a-39daba5e3ba6-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "55bbd00b-56a2-42a4-a75a-39daba5e3ba6" (UID: "55bbd00b-56a2-42a4-a75a-39daba5e3ba6"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:49:04 crc kubenswrapper[5021]: I0121 15:49:04.781605 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/55bbd00b-56a2-42a4-a75a-39daba5e3ba6-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "55bbd00b-56a2-42a4-a75a-39daba5e3ba6" (UID: "55bbd00b-56a2-42a4-a75a-39daba5e3ba6"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:49:04 crc kubenswrapper[5021]: I0121 15:49:04.781779 5021 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/55bbd00b-56a2-42a4-a75a-39daba5e3ba6-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:04 crc kubenswrapper[5021]: I0121 15:49:04.781799 5021 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/55bbd00b-56a2-42a4-a75a-39daba5e3ba6-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:04 crc kubenswrapper[5021]: I0121 15:49:04.786645 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/55bbd00b-56a2-42a4-a75a-39daba5e3ba6-kube-api-access-fnfdf" (OuterVolumeSpecName: "kube-api-access-fnfdf") pod "55bbd00b-56a2-42a4-a75a-39daba5e3ba6" (UID: "55bbd00b-56a2-42a4-a75a-39daba5e3ba6"). InnerVolumeSpecName "kube-api-access-fnfdf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:49:04 crc kubenswrapper[5021]: I0121 15:49:04.787649 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55bbd00b-56a2-42a4-a75a-39daba5e3ba6-scripts" (OuterVolumeSpecName: "scripts") pod "55bbd00b-56a2-42a4-a75a-39daba5e3ba6" (UID: "55bbd00b-56a2-42a4-a75a-39daba5e3ba6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:49:04 crc kubenswrapper[5021]: I0121 15:49:04.825286 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55bbd00b-56a2-42a4-a75a-39daba5e3ba6-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "55bbd00b-56a2-42a4-a75a-39daba5e3ba6" (UID: "55bbd00b-56a2-42a4-a75a-39daba5e3ba6"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:49:04 crc kubenswrapper[5021]: I0121 15:49:04.828516 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55bbd00b-56a2-42a4-a75a-39daba5e3ba6-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "55bbd00b-56a2-42a4-a75a-39daba5e3ba6" (UID: "55bbd00b-56a2-42a4-a75a-39daba5e3ba6"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:49:04 crc kubenswrapper[5021]: I0121 15:49:04.840637 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55bbd00b-56a2-42a4-a75a-39daba5e3ba6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "55bbd00b-56a2-42a4-a75a-39daba5e3ba6" (UID: "55bbd00b-56a2-42a4-a75a-39daba5e3ba6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:49:04 crc kubenswrapper[5021]: I0121 15:49:04.866101 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55bbd00b-56a2-42a4-a75a-39daba5e3ba6-config-data" (OuterVolumeSpecName: "config-data") pod "55bbd00b-56a2-42a4-a75a-39daba5e3ba6" (UID: "55bbd00b-56a2-42a4-a75a-39daba5e3ba6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:49:04 crc kubenswrapper[5021]: I0121 15:49:04.882898 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55bbd00b-56a2-42a4-a75a-39daba5e3ba6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:04 crc kubenswrapper[5021]: I0121 15:49:04.882968 5021 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/55bbd00b-56a2-42a4-a75a-39daba5e3ba6-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:04 crc kubenswrapper[5021]: I0121 15:49:04.882980 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fnfdf\" (UniqueName: \"kubernetes.io/projected/55bbd00b-56a2-42a4-a75a-39daba5e3ba6-kube-api-access-fnfdf\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:04 crc kubenswrapper[5021]: I0121 15:49:04.882990 5021 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55bbd00b-56a2-42a4-a75a-39daba5e3ba6-config-data\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:04 crc kubenswrapper[5021]: I0121 15:49:04.883000 5021 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/55bbd00b-56a2-42a4-a75a-39daba5e3ba6-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:04 crc kubenswrapper[5021]: I0121 15:49:04.883012 5021 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/55bbd00b-56a2-42a4-a75a-39daba5e3ba6-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:05 crc kubenswrapper[5021]: I0121 15:49:05.453588 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"55bbd00b-56a2-42a4-a75a-39daba5e3ba6","Type":"ContainerDied","Data":"b4c373a3cf0a113ad7ec48c766242bd60778dae0ef5dccf264168c260bf583bd"} Jan 21 15:49:05 crc kubenswrapper[5021]: I0121 15:49:05.453639 5021 scope.go:117] "RemoveContainer" containerID="9e0d0434088b24ecb2f7a6a737f0cc34be54ffb9cbf7f5f7696a923ceacf6bc2" Jan 21 15:49:05 crc kubenswrapper[5021]: I0121 15:49:05.453780 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 21 15:49:05 crc kubenswrapper[5021]: I0121 15:49:05.477782 5021 scope.go:117] "RemoveContainer" containerID="9e7ddce4cf8d1e90155d69318107e51e50dde636777370df6d9675e6e804665e" Jan 21 15:49:05 crc kubenswrapper[5021]: I0121 15:49:05.486084 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 21 15:49:05 crc kubenswrapper[5021]: I0121 15:49:05.491209 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 21 15:49:05 crc kubenswrapper[5021]: I0121 15:49:05.499092 5021 scope.go:117] "RemoveContainer" containerID="8d425e98d39b82e8ff834275afbbd09d41f458b740a780db59954fa6251d14e5" Jan 21 15:49:05 crc kubenswrapper[5021]: I0121 15:49:05.519967 5021 scope.go:117] "RemoveContainer" containerID="5037559fb60350e2158a6a5f376df3b44cba445783affafde419d451bd46ca8e" Jan 21 15:49:06 crc kubenswrapper[5021]: I0121 15:49:06.746194 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="55bbd00b-56a2-42a4-a75a-39daba5e3ba6" path="/var/lib/kubelet/pods/55bbd00b-56a2-42a4-a75a-39daba5e3ba6/volumes" Jan 21 15:49:07 crc kubenswrapper[5021]: E0121 15:49:07.519709 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c51e2000b1b5cdd558697f3c6e26fdd8147721993df1b96e0e8e62f974486b5d is running failed: container process not found" containerID="c51e2000b1b5cdd558697f3c6e26fdd8147721993df1b96e0e8e62f974486b5d" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 21 15:49:07 crc kubenswrapper[5021]: E0121 15:49:07.520494 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c51e2000b1b5cdd558697f3c6e26fdd8147721993df1b96e0e8e62f974486b5d is running failed: container process not found" containerID="c51e2000b1b5cdd558697f3c6e26fdd8147721993df1b96e0e8e62f974486b5d" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 21 15:49:07 crc kubenswrapper[5021]: E0121 15:49:07.520829 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c51e2000b1b5cdd558697f3c6e26fdd8147721993df1b96e0e8e62f974486b5d is running failed: container process not found" containerID="c51e2000b1b5cdd558697f3c6e26fdd8147721993df1b96e0e8e62f974486b5d" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 21 15:49:07 crc kubenswrapper[5021]: E0121 15:49:07.520877 5021 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c51e2000b1b5cdd558697f3c6e26fdd8147721993df1b96e0e8e62f974486b5d is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-bk98m" podUID="06ba8703-2573-4c30-82ec-36290cf378f4" containerName="ovsdb-server" Jan 21 15:49:07 crc kubenswrapper[5021]: E0121 15:49:07.522186 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c9aea0f956b83c345479dd657b4586b966e9d9026a0a0d504fe374b1d7498204" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 21 15:49:07 crc kubenswrapper[5021]: E0121 15:49:07.523812 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c9aea0f956b83c345479dd657b4586b966e9d9026a0a0d504fe374b1d7498204" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 21 15:49:07 crc kubenswrapper[5021]: E0121 15:49:07.528754 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c9aea0f956b83c345479dd657b4586b966e9d9026a0a0d504fe374b1d7498204" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 21 15:49:07 crc kubenswrapper[5021]: E0121 15:49:07.529054 5021 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-bk98m" podUID="06ba8703-2573-4c30-82ec-36290cf378f4" containerName="ovs-vswitchd" Jan 21 15:49:10 crc kubenswrapper[5021]: E0121 15:49:10.118766 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4a59683749a5d31a2fcc97158cd1d9ceb81e127e3f847cce0c972bac9d288d8e" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Jan 21 15:49:10 crc kubenswrapper[5021]: E0121 15:49:10.120564 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4a59683749a5d31a2fcc97158cd1d9ceb81e127e3f847cce0c972bac9d288d8e" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Jan 21 15:49:10 crc kubenswrapper[5021]: E0121 15:49:10.121650 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4a59683749a5d31a2fcc97158cd1d9ceb81e127e3f847cce0c972bac9d288d8e" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Jan 21 15:49:10 crc kubenswrapper[5021]: E0121 15:49:10.121680 5021 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="4184ba08-6582-4367-abd3-9e9cffb5b716" containerName="galera" Jan 21 15:49:10 crc kubenswrapper[5021]: I0121 15:49:10.608372 5021 generic.go:334] "Generic (PLEG): container finished" podID="ddbf76eb-0e2a-4332-b741-0e0b63b60465" containerID="40d25da6134bf8a7e089cfd3c065d27e8bfaa57441da4124734dff449dcf1ca3" exitCode=0 Jan 21 15:49:10 crc kubenswrapper[5021]: I0121 15:49:10.608532 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-c795c5585-m9bzp" event={"ID":"ddbf76eb-0e2a-4332-b741-0e0b63b60465","Type":"ContainerDied","Data":"40d25da6134bf8a7e089cfd3c065d27e8bfaa57441da4124734dff449dcf1ca3"} Jan 21 15:49:10 crc kubenswrapper[5021]: I0121 15:49:10.608824 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-c795c5585-m9bzp" event={"ID":"ddbf76eb-0e2a-4332-b741-0e0b63b60465","Type":"ContainerDied","Data":"5a0c806db16dee09c2027bd3e03417e0fc89a8d6f346f0958538c0a1a8dc48c4"} Jan 21 15:49:10 crc kubenswrapper[5021]: I0121 15:49:10.608851 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5a0c806db16dee09c2027bd3e03417e0fc89a8d6f346f0958538c0a1a8dc48c4" Jan 21 15:49:10 crc kubenswrapper[5021]: I0121 15:49:10.659131 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-c795c5585-m9bzp" Jan 21 15:49:10 crc kubenswrapper[5021]: I0121 15:49:10.767319 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ddbf76eb-0e2a-4332-b741-0e0b63b60465-internal-tls-certs\") pod \"ddbf76eb-0e2a-4332-b741-0e0b63b60465\" (UID: \"ddbf76eb-0e2a-4332-b741-0e0b63b60465\") " Jan 21 15:49:10 crc kubenswrapper[5021]: I0121 15:49:10.767366 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/ddbf76eb-0e2a-4332-b741-0e0b63b60465-config\") pod \"ddbf76eb-0e2a-4332-b741-0e0b63b60465\" (UID: \"ddbf76eb-0e2a-4332-b741-0e0b63b60465\") " Jan 21 15:49:10 crc kubenswrapper[5021]: I0121 15:49:10.767412 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cc4wf\" (UniqueName: \"kubernetes.io/projected/ddbf76eb-0e2a-4332-b741-0e0b63b60465-kube-api-access-cc4wf\") pod \"ddbf76eb-0e2a-4332-b741-0e0b63b60465\" (UID: \"ddbf76eb-0e2a-4332-b741-0e0b63b60465\") " Jan 21 15:49:10 crc kubenswrapper[5021]: I0121 15:49:10.767463 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ddbf76eb-0e2a-4332-b741-0e0b63b60465-public-tls-certs\") pod \"ddbf76eb-0e2a-4332-b741-0e0b63b60465\" (UID: \"ddbf76eb-0e2a-4332-b741-0e0b63b60465\") " Jan 21 15:49:10 crc kubenswrapper[5021]: I0121 15:49:10.767524 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/ddbf76eb-0e2a-4332-b741-0e0b63b60465-httpd-config\") pod \"ddbf76eb-0e2a-4332-b741-0e0b63b60465\" (UID: \"ddbf76eb-0e2a-4332-b741-0e0b63b60465\") " Jan 21 15:49:10 crc kubenswrapper[5021]: I0121 15:49:10.767552 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ddbf76eb-0e2a-4332-b741-0e0b63b60465-ovndb-tls-certs\") pod \"ddbf76eb-0e2a-4332-b741-0e0b63b60465\" (UID: \"ddbf76eb-0e2a-4332-b741-0e0b63b60465\") " Jan 21 15:49:10 crc kubenswrapper[5021]: I0121 15:49:10.767632 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ddbf76eb-0e2a-4332-b741-0e0b63b60465-combined-ca-bundle\") pod \"ddbf76eb-0e2a-4332-b741-0e0b63b60465\" (UID: \"ddbf76eb-0e2a-4332-b741-0e0b63b60465\") " Jan 21 15:49:10 crc kubenswrapper[5021]: I0121 15:49:10.773986 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ddbf76eb-0e2a-4332-b741-0e0b63b60465-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "ddbf76eb-0e2a-4332-b741-0e0b63b60465" (UID: "ddbf76eb-0e2a-4332-b741-0e0b63b60465"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:49:10 crc kubenswrapper[5021]: I0121 15:49:10.774284 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ddbf76eb-0e2a-4332-b741-0e0b63b60465-kube-api-access-cc4wf" (OuterVolumeSpecName: "kube-api-access-cc4wf") pod "ddbf76eb-0e2a-4332-b741-0e0b63b60465" (UID: "ddbf76eb-0e2a-4332-b741-0e0b63b60465"). InnerVolumeSpecName "kube-api-access-cc4wf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:49:10 crc kubenswrapper[5021]: I0121 15:49:10.806158 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ddbf76eb-0e2a-4332-b741-0e0b63b60465-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "ddbf76eb-0e2a-4332-b741-0e0b63b60465" (UID: "ddbf76eb-0e2a-4332-b741-0e0b63b60465"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:49:10 crc kubenswrapper[5021]: I0121 15:49:10.811405 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ddbf76eb-0e2a-4332-b741-0e0b63b60465-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ddbf76eb-0e2a-4332-b741-0e0b63b60465" (UID: "ddbf76eb-0e2a-4332-b741-0e0b63b60465"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:49:10 crc kubenswrapper[5021]: I0121 15:49:10.813675 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ddbf76eb-0e2a-4332-b741-0e0b63b60465-config" (OuterVolumeSpecName: "config") pod "ddbf76eb-0e2a-4332-b741-0e0b63b60465" (UID: "ddbf76eb-0e2a-4332-b741-0e0b63b60465"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:49:10 crc kubenswrapper[5021]: I0121 15:49:10.819653 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ddbf76eb-0e2a-4332-b741-0e0b63b60465-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "ddbf76eb-0e2a-4332-b741-0e0b63b60465" (UID: "ddbf76eb-0e2a-4332-b741-0e0b63b60465"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:49:10 crc kubenswrapper[5021]: I0121 15:49:10.827625 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ddbf76eb-0e2a-4332-b741-0e0b63b60465-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "ddbf76eb-0e2a-4332-b741-0e0b63b60465" (UID: "ddbf76eb-0e2a-4332-b741-0e0b63b60465"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:49:10 crc kubenswrapper[5021]: I0121 15:49:10.869178 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ddbf76eb-0e2a-4332-b741-0e0b63b60465-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:10 crc kubenswrapper[5021]: I0121 15:49:10.869224 5021 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ddbf76eb-0e2a-4332-b741-0e0b63b60465-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:10 crc kubenswrapper[5021]: I0121 15:49:10.869236 5021 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/ddbf76eb-0e2a-4332-b741-0e0b63b60465-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:10 crc kubenswrapper[5021]: I0121 15:49:10.869250 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cc4wf\" (UniqueName: \"kubernetes.io/projected/ddbf76eb-0e2a-4332-b741-0e0b63b60465-kube-api-access-cc4wf\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:10 crc kubenswrapper[5021]: I0121 15:49:10.869263 5021 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ddbf76eb-0e2a-4332-b741-0e0b63b60465-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:10 crc kubenswrapper[5021]: I0121 15:49:10.869273 5021 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/ddbf76eb-0e2a-4332-b741-0e0b63b60465-httpd-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:10 crc kubenswrapper[5021]: I0121 15:49:10.869284 5021 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ddbf76eb-0e2a-4332-b741-0e0b63b60465-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:11 crc kubenswrapper[5021]: I0121 15:49:11.617059 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-c795c5585-m9bzp" Jan 21 15:49:11 crc kubenswrapper[5021]: I0121 15:49:11.651444 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-c795c5585-m9bzp"] Jan 21 15:49:11 crc kubenswrapper[5021]: I0121 15:49:11.656975 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-c795c5585-m9bzp"] Jan 21 15:49:12 crc kubenswrapper[5021]: E0121 15:49:12.518498 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c51e2000b1b5cdd558697f3c6e26fdd8147721993df1b96e0e8e62f974486b5d is running failed: container process not found" containerID="c51e2000b1b5cdd558697f3c6e26fdd8147721993df1b96e0e8e62f974486b5d" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 21 15:49:12 crc kubenswrapper[5021]: E0121 15:49:12.519283 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c51e2000b1b5cdd558697f3c6e26fdd8147721993df1b96e0e8e62f974486b5d is running failed: container process not found" containerID="c51e2000b1b5cdd558697f3c6e26fdd8147721993df1b96e0e8e62f974486b5d" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 21 15:49:12 crc kubenswrapper[5021]: E0121 15:49:12.519949 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c51e2000b1b5cdd558697f3c6e26fdd8147721993df1b96e0e8e62f974486b5d is running failed: container process not found" containerID="c51e2000b1b5cdd558697f3c6e26fdd8147721993df1b96e0e8e62f974486b5d" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 21 15:49:12 crc kubenswrapper[5021]: E0121 15:49:12.519998 5021 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c51e2000b1b5cdd558697f3c6e26fdd8147721993df1b96e0e8e62f974486b5d is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-bk98m" podUID="06ba8703-2573-4c30-82ec-36290cf378f4" containerName="ovsdb-server" Jan 21 15:49:12 crc kubenswrapper[5021]: E0121 15:49:12.520787 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c9aea0f956b83c345479dd657b4586b966e9d9026a0a0d504fe374b1d7498204" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 21 15:49:12 crc kubenswrapper[5021]: E0121 15:49:12.526784 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c9aea0f956b83c345479dd657b4586b966e9d9026a0a0d504fe374b1d7498204" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 21 15:49:12 crc kubenswrapper[5021]: E0121 15:49:12.529532 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c9aea0f956b83c345479dd657b4586b966e9d9026a0a0d504fe374b1d7498204" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 21 15:49:12 crc kubenswrapper[5021]: E0121 15:49:12.529640 5021 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-bk98m" podUID="06ba8703-2573-4c30-82ec-36290cf378f4" containerName="ovs-vswitchd" Jan 21 15:49:12 crc kubenswrapper[5021]: I0121 15:49:12.746802 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ddbf76eb-0e2a-4332-b741-0e0b63b60465" path="/var/lib/kubelet/pods/ddbf76eb-0e2a-4332-b741-0e0b63b60465/volumes" Jan 21 15:49:17 crc kubenswrapper[5021]: E0121 15:49:17.519502 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c51e2000b1b5cdd558697f3c6e26fdd8147721993df1b96e0e8e62f974486b5d is running failed: container process not found" containerID="c51e2000b1b5cdd558697f3c6e26fdd8147721993df1b96e0e8e62f974486b5d" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 21 15:49:17 crc kubenswrapper[5021]: E0121 15:49:17.520197 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c51e2000b1b5cdd558697f3c6e26fdd8147721993df1b96e0e8e62f974486b5d is running failed: container process not found" containerID="c51e2000b1b5cdd558697f3c6e26fdd8147721993df1b96e0e8e62f974486b5d" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 21 15:49:17 crc kubenswrapper[5021]: E0121 15:49:17.520440 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c51e2000b1b5cdd558697f3c6e26fdd8147721993df1b96e0e8e62f974486b5d is running failed: container process not found" containerID="c51e2000b1b5cdd558697f3c6e26fdd8147721993df1b96e0e8e62f974486b5d" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 21 15:49:17 crc kubenswrapper[5021]: E0121 15:49:17.520495 5021 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c51e2000b1b5cdd558697f3c6e26fdd8147721993df1b96e0e8e62f974486b5d is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-bk98m" podUID="06ba8703-2573-4c30-82ec-36290cf378f4" containerName="ovsdb-server" Jan 21 15:49:17 crc kubenswrapper[5021]: E0121 15:49:17.522422 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c9aea0f956b83c345479dd657b4586b966e9d9026a0a0d504fe374b1d7498204" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 21 15:49:17 crc kubenswrapper[5021]: E0121 15:49:17.523553 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c9aea0f956b83c345479dd657b4586b966e9d9026a0a0d504fe374b1d7498204" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 21 15:49:17 crc kubenswrapper[5021]: E0121 15:49:17.527388 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c9aea0f956b83c345479dd657b4586b966e9d9026a0a0d504fe374b1d7498204" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 21 15:49:17 crc kubenswrapper[5021]: E0121 15:49:17.527427 5021 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-bk98m" podUID="06ba8703-2573-4c30-82ec-36290cf378f4" containerName="ovs-vswitchd" Jan 21 15:49:20 crc kubenswrapper[5021]: E0121 15:49:20.117950 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4a59683749a5d31a2fcc97158cd1d9ceb81e127e3f847cce0c972bac9d288d8e" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Jan 21 15:49:20 crc kubenswrapper[5021]: E0121 15:49:20.119657 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4a59683749a5d31a2fcc97158cd1d9ceb81e127e3f847cce0c972bac9d288d8e" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Jan 21 15:49:20 crc kubenswrapper[5021]: E0121 15:49:20.121130 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4a59683749a5d31a2fcc97158cd1d9ceb81e127e3f847cce0c972bac9d288d8e" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Jan 21 15:49:20 crc kubenswrapper[5021]: E0121 15:49:20.121174 5021 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="4184ba08-6582-4367-abd3-9e9cffb5b716" containerName="galera" Jan 21 15:49:22 crc kubenswrapper[5021]: E0121 15:49:22.519173 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c9aea0f956b83c345479dd657b4586b966e9d9026a0a0d504fe374b1d7498204 is running failed: container process not found" containerID="c9aea0f956b83c345479dd657b4586b966e9d9026a0a0d504fe374b1d7498204" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 21 15:49:22 crc kubenswrapper[5021]: E0121 15:49:22.519172 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c51e2000b1b5cdd558697f3c6e26fdd8147721993df1b96e0e8e62f974486b5d is running failed: container process not found" containerID="c51e2000b1b5cdd558697f3c6e26fdd8147721993df1b96e0e8e62f974486b5d" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 21 15:49:22 crc kubenswrapper[5021]: E0121 15:49:22.520437 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c9aea0f956b83c345479dd657b4586b966e9d9026a0a0d504fe374b1d7498204 is running failed: container process not found" containerID="c9aea0f956b83c345479dd657b4586b966e9d9026a0a0d504fe374b1d7498204" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 21 15:49:22 crc kubenswrapper[5021]: E0121 15:49:22.520881 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c51e2000b1b5cdd558697f3c6e26fdd8147721993df1b96e0e8e62f974486b5d is running failed: container process not found" containerID="c51e2000b1b5cdd558697f3c6e26fdd8147721993df1b96e0e8e62f974486b5d" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 21 15:49:22 crc kubenswrapper[5021]: E0121 15:49:22.520939 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c9aea0f956b83c345479dd657b4586b966e9d9026a0a0d504fe374b1d7498204 is running failed: container process not found" containerID="c9aea0f956b83c345479dd657b4586b966e9d9026a0a0d504fe374b1d7498204" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 21 15:49:22 crc kubenswrapper[5021]: E0121 15:49:22.521082 5021 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c9aea0f956b83c345479dd657b4586b966e9d9026a0a0d504fe374b1d7498204 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-bk98m" podUID="06ba8703-2573-4c30-82ec-36290cf378f4" containerName="ovs-vswitchd" Jan 21 15:49:22 crc kubenswrapper[5021]: E0121 15:49:22.521252 5021 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c51e2000b1b5cdd558697f3c6e26fdd8147721993df1b96e0e8e62f974486b5d is running failed: container process not found" containerID="c51e2000b1b5cdd558697f3c6e26fdd8147721993df1b96e0e8e62f974486b5d" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 21 15:49:22 crc kubenswrapper[5021]: E0121 15:49:22.521305 5021 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c51e2000b1b5cdd558697f3c6e26fdd8147721993df1b96e0e8e62f974486b5d is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-bk98m" podUID="06ba8703-2573-4c30-82ec-36290cf378f4" containerName="ovsdb-server" Jan 21 15:49:22 crc kubenswrapper[5021]: I0121 15:49:22.730288 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-bk98m_06ba8703-2573-4c30-82ec-36290cf378f4/ovs-vswitchd/0.log" Jan 21 15:49:22 crc kubenswrapper[5021]: I0121 15:49:22.731438 5021 generic.go:334] "Generic (PLEG): container finished" podID="06ba8703-2573-4c30-82ec-36290cf378f4" containerID="c9aea0f956b83c345479dd657b4586b966e9d9026a0a0d504fe374b1d7498204" exitCode=137 Jan 21 15:49:22 crc kubenswrapper[5021]: I0121 15:49:22.731517 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-bk98m" event={"ID":"06ba8703-2573-4c30-82ec-36290cf378f4","Type":"ContainerDied","Data":"c9aea0f956b83c345479dd657b4586b966e9d9026a0a0d504fe374b1d7498204"} Jan 21 15:49:22 crc kubenswrapper[5021]: I0121 15:49:22.734768 5021 generic.go:334] "Generic (PLEG): container finished" podID="4184ba08-6582-4367-abd3-9e9cffb5b716" containerID="4a59683749a5d31a2fcc97158cd1d9ceb81e127e3f847cce0c972bac9d288d8e" exitCode=0 Jan 21 15:49:22 crc kubenswrapper[5021]: I0121 15:49:22.734853 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"4184ba08-6582-4367-abd3-9e9cffb5b716","Type":"ContainerDied","Data":"4a59683749a5d31a2fcc97158cd1d9ceb81e127e3f847cce0c972bac9d288d8e"} Jan 21 15:49:22 crc kubenswrapper[5021]: I0121 15:49:22.745448 5021 generic.go:334] "Generic (PLEG): container finished" podID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerID="1177abe509b1fc7d36535c70f37ab796f728a73afa2630b7247a37b263d96673" exitCode=137 Jan 21 15:49:22 crc kubenswrapper[5021]: I0121 15:49:22.750498 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c57ca8a9-e2f8-4404-b56f-649297cba618","Type":"ContainerDied","Data":"1177abe509b1fc7d36535c70f37ab796f728a73afa2630b7247a37b263d96673"} Jan 21 15:49:22 crc kubenswrapper[5021]: I0121 15:49:22.926799 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.047657 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4184ba08-6582-4367-abd3-9e9cffb5b716-operator-scripts\") pod \"4184ba08-6582-4367-abd3-9e9cffb5b716\" (UID: \"4184ba08-6582-4367-abd3-9e9cffb5b716\") " Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.047729 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4184ba08-6582-4367-abd3-9e9cffb5b716-combined-ca-bundle\") pod \"4184ba08-6582-4367-abd3-9e9cffb5b716\" (UID: \"4184ba08-6582-4367-abd3-9e9cffb5b716\") " Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.047771 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9k4rv\" (UniqueName: \"kubernetes.io/projected/4184ba08-6582-4367-abd3-9e9cffb5b716-kube-api-access-9k4rv\") pod \"4184ba08-6582-4367-abd3-9e9cffb5b716\" (UID: \"4184ba08-6582-4367-abd3-9e9cffb5b716\") " Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.047863 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mysql-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"4184ba08-6582-4367-abd3-9e9cffb5b716\" (UID: \"4184ba08-6582-4367-abd3-9e9cffb5b716\") " Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.047886 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/4184ba08-6582-4367-abd3-9e9cffb5b716-galera-tls-certs\") pod \"4184ba08-6582-4367-abd3-9e9cffb5b716\" (UID: \"4184ba08-6582-4367-abd3-9e9cffb5b716\") " Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.047931 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/4184ba08-6582-4367-abd3-9e9cffb5b716-kolla-config\") pod \"4184ba08-6582-4367-abd3-9e9cffb5b716\" (UID: \"4184ba08-6582-4367-abd3-9e9cffb5b716\") " Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.047971 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/4184ba08-6582-4367-abd3-9e9cffb5b716-config-data-default\") pod \"4184ba08-6582-4367-abd3-9e9cffb5b716\" (UID: \"4184ba08-6582-4367-abd3-9e9cffb5b716\") " Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.047991 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/4184ba08-6582-4367-abd3-9e9cffb5b716-config-data-generated\") pod \"4184ba08-6582-4367-abd3-9e9cffb5b716\" (UID: \"4184ba08-6582-4367-abd3-9e9cffb5b716\") " Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.048829 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4184ba08-6582-4367-abd3-9e9cffb5b716-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4184ba08-6582-4367-abd3-9e9cffb5b716" (UID: "4184ba08-6582-4367-abd3-9e9cffb5b716"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.048941 5021 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4184ba08-6582-4367-abd3-9e9cffb5b716-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.048902 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4184ba08-6582-4367-abd3-9e9cffb5b716-config-data-generated" (OuterVolumeSpecName: "config-data-generated") pod "4184ba08-6582-4367-abd3-9e9cffb5b716" (UID: "4184ba08-6582-4367-abd3-9e9cffb5b716"). InnerVolumeSpecName "config-data-generated". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.048967 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4184ba08-6582-4367-abd3-9e9cffb5b716-config-data-default" (OuterVolumeSpecName: "config-data-default") pod "4184ba08-6582-4367-abd3-9e9cffb5b716" (UID: "4184ba08-6582-4367-abd3-9e9cffb5b716"). InnerVolumeSpecName "config-data-default". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.049160 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4184ba08-6582-4367-abd3-9e9cffb5b716-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "4184ba08-6582-4367-abd3-9e9cffb5b716" (UID: "4184ba08-6582-4367-abd3-9e9cffb5b716"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.053456 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4184ba08-6582-4367-abd3-9e9cffb5b716-kube-api-access-9k4rv" (OuterVolumeSpecName: "kube-api-access-9k4rv") pod "4184ba08-6582-4367-abd3-9e9cffb5b716" (UID: "4184ba08-6582-4367-abd3-9e9cffb5b716"). InnerVolumeSpecName "kube-api-access-9k4rv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.061716 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "mysql-db") pod "4184ba08-6582-4367-abd3-9e9cffb5b716" (UID: "4184ba08-6582-4367-abd3-9e9cffb5b716"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.086998 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4184ba08-6582-4367-abd3-9e9cffb5b716-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4184ba08-6582-4367-abd3-9e9cffb5b716" (UID: "4184ba08-6582-4367-abd3-9e9cffb5b716"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.100051 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-bk98m_06ba8703-2573-4c30-82ec-36290cf378f4/ovs-vswitchd/0.log" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.101231 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-bk98m" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.142874 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4184ba08-6582-4367-abd3-9e9cffb5b716-galera-tls-certs" (OuterVolumeSpecName: "galera-tls-certs") pod "4184ba08-6582-4367-abd3-9e9cffb5b716" (UID: "4184ba08-6582-4367-abd3-9e9cffb5b716"). InnerVolumeSpecName "galera-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.152346 5021 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.152385 5021 reconciler_common.go:293] "Volume detached for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/4184ba08-6582-4367-abd3-9e9cffb5b716-galera-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.152399 5021 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/4184ba08-6582-4367-abd3-9e9cffb5b716-kolla-config\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.152410 5021 reconciler_common.go:293] "Volume detached for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/4184ba08-6582-4367-abd3-9e9cffb5b716-config-data-default\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.152427 5021 reconciler_common.go:293] "Volume detached for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/4184ba08-6582-4367-abd3-9e9cffb5b716-config-data-generated\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.152437 5021 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4184ba08-6582-4367-abd3-9e9cffb5b716-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.152448 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9k4rv\" (UniqueName: \"kubernetes.io/projected/4184ba08-6582-4367-abd3-9e9cffb5b716-kube-api-access-9k4rv\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.178693 5021 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.253139 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/06ba8703-2573-4c30-82ec-36290cf378f4-etc-ovs\") pod \"06ba8703-2573-4c30-82ec-36290cf378f4\" (UID: \"06ba8703-2573-4c30-82ec-36290cf378f4\") " Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.253292 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/06ba8703-2573-4c30-82ec-36290cf378f4-etc-ovs" (OuterVolumeSpecName: "etc-ovs") pod "06ba8703-2573-4c30-82ec-36290cf378f4" (UID: "06ba8703-2573-4c30-82ec-36290cf378f4"). InnerVolumeSpecName "etc-ovs". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.253415 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/06ba8703-2573-4c30-82ec-36290cf378f4-var-run\") pod \"06ba8703-2573-4c30-82ec-36290cf378f4\" (UID: \"06ba8703-2573-4c30-82ec-36290cf378f4\") " Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.253585 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/06ba8703-2573-4c30-82ec-36290cf378f4-var-run" (OuterVolumeSpecName: "var-run") pod "06ba8703-2573-4c30-82ec-36290cf378f4" (UID: "06ba8703-2573-4c30-82ec-36290cf378f4"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.253592 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/06ba8703-2573-4c30-82ec-36290cf378f4-scripts\") pod \"06ba8703-2573-4c30-82ec-36290cf378f4\" (UID: \"06ba8703-2573-4c30-82ec-36290cf378f4\") " Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.253794 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v5rlq\" (UniqueName: \"kubernetes.io/projected/06ba8703-2573-4c30-82ec-36290cf378f4-kube-api-access-v5rlq\") pod \"06ba8703-2573-4c30-82ec-36290cf378f4\" (UID: \"06ba8703-2573-4c30-82ec-36290cf378f4\") " Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.253831 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/06ba8703-2573-4c30-82ec-36290cf378f4-var-log\") pod \"06ba8703-2573-4c30-82ec-36290cf378f4\" (UID: \"06ba8703-2573-4c30-82ec-36290cf378f4\") " Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.253947 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/06ba8703-2573-4c30-82ec-36290cf378f4-var-lib\") pod \"06ba8703-2573-4c30-82ec-36290cf378f4\" (UID: \"06ba8703-2573-4c30-82ec-36290cf378f4\") " Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.254552 5021 reconciler_common.go:293] "Volume detached for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/06ba8703-2573-4c30-82ec-36290cf378f4-etc-ovs\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.254619 5021 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/06ba8703-2573-4c30-82ec-36290cf378f4-var-run\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.254634 5021 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.254549 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/06ba8703-2573-4c30-82ec-36290cf378f4-var-log" (OuterVolumeSpecName: "var-log") pod "06ba8703-2573-4c30-82ec-36290cf378f4" (UID: "06ba8703-2573-4c30-82ec-36290cf378f4"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.254571 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/06ba8703-2573-4c30-82ec-36290cf378f4-var-lib" (OuterVolumeSpecName: "var-lib") pod "06ba8703-2573-4c30-82ec-36290cf378f4" (UID: "06ba8703-2573-4c30-82ec-36290cf378f4"). InnerVolumeSpecName "var-lib". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.256768 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/06ba8703-2573-4c30-82ec-36290cf378f4-scripts" (OuterVolumeSpecName: "scripts") pod "06ba8703-2573-4c30-82ec-36290cf378f4" (UID: "06ba8703-2573-4c30-82ec-36290cf378f4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.257318 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/06ba8703-2573-4c30-82ec-36290cf378f4-kube-api-access-v5rlq" (OuterVolumeSpecName: "kube-api-access-v5rlq") pod "06ba8703-2573-4c30-82ec-36290cf378f4" (UID: "06ba8703-2573-4c30-82ec-36290cf378f4"). InnerVolumeSpecName "kube-api-access-v5rlq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.261043 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.355892 5021 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/06ba8703-2573-4c30-82ec-36290cf378f4-scripts\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.355973 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v5rlq\" (UniqueName: \"kubernetes.io/projected/06ba8703-2573-4c30-82ec-36290cf378f4-kube-api-access-v5rlq\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.355994 5021 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/06ba8703-2573-4c30-82ec-36290cf378f4-var-log\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.356005 5021 reconciler_common.go:293] "Volume detached for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/06ba8703-2573-4c30-82ec-36290cf378f4-var-lib\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.456846 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/c57ca8a9-e2f8-4404-b56f-649297cba618-cache\") pod \"c57ca8a9-e2f8-4404-b56f-649297cba618\" (UID: \"c57ca8a9-e2f8-4404-b56f-649297cba618\") " Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.457117 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swift\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"c57ca8a9-e2f8-4404-b56f-649297cba618\" (UID: \"c57ca8a9-e2f8-4404-b56f-649297cba618\") " Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.457151 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/c57ca8a9-e2f8-4404-b56f-649297cba618-lock\") pod \"c57ca8a9-e2f8-4404-b56f-649297cba618\" (UID: \"c57ca8a9-e2f8-4404-b56f-649297cba618\") " Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.457202 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/c57ca8a9-e2f8-4404-b56f-649297cba618-etc-swift\") pod \"c57ca8a9-e2f8-4404-b56f-649297cba618\" (UID: \"c57ca8a9-e2f8-4404-b56f-649297cba618\") " Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.457250 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4zn7f\" (UniqueName: \"kubernetes.io/projected/c57ca8a9-e2f8-4404-b56f-649297cba618-kube-api-access-4zn7f\") pod \"c57ca8a9-e2f8-4404-b56f-649297cba618\" (UID: \"c57ca8a9-e2f8-4404-b56f-649297cba618\") " Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.458338 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c57ca8a9-e2f8-4404-b56f-649297cba618-cache" (OuterVolumeSpecName: "cache") pod "c57ca8a9-e2f8-4404-b56f-649297cba618" (UID: "c57ca8a9-e2f8-4404-b56f-649297cba618"). InnerVolumeSpecName "cache". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.458513 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c57ca8a9-e2f8-4404-b56f-649297cba618-lock" (OuterVolumeSpecName: "lock") pod "c57ca8a9-e2f8-4404-b56f-649297cba618" (UID: "c57ca8a9-e2f8-4404-b56f-649297cba618"). InnerVolumeSpecName "lock". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.460633 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage11-crc" (OuterVolumeSpecName: "swift") pod "c57ca8a9-e2f8-4404-b56f-649297cba618" (UID: "c57ca8a9-e2f8-4404-b56f-649297cba618"). InnerVolumeSpecName "local-storage11-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.460654 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c57ca8a9-e2f8-4404-b56f-649297cba618-kube-api-access-4zn7f" (OuterVolumeSpecName: "kube-api-access-4zn7f") pod "c57ca8a9-e2f8-4404-b56f-649297cba618" (UID: "c57ca8a9-e2f8-4404-b56f-649297cba618"). InnerVolumeSpecName "kube-api-access-4zn7f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.461201 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c57ca8a9-e2f8-4404-b56f-649297cba618-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "c57ca8a9-e2f8-4404-b56f-649297cba618" (UID: "c57ca8a9-e2f8-4404-b56f-649297cba618"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.559515 5021 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" " Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.560143 5021 reconciler_common.go:293] "Volume detached for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/c57ca8a9-e2f8-4404-b56f-649297cba618-lock\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.560190 5021 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/c57ca8a9-e2f8-4404-b56f-649297cba618-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.560211 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4zn7f\" (UniqueName: \"kubernetes.io/projected/c57ca8a9-e2f8-4404-b56f-649297cba618-kube-api-access-4zn7f\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.560226 5021 reconciler_common.go:293] "Volume detached for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/c57ca8a9-e2f8-4404-b56f-649297cba618-cache\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.575575 5021 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage11-crc" (UniqueName: "kubernetes.io/local-volume/local-storage11-crc") on node "crc" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.661680 5021 reconciler_common.go:293] "Volume detached for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" DevicePath \"\"" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.765565 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c57ca8a9-e2f8-4404-b56f-649297cba618","Type":"ContainerDied","Data":"9b5df468f1f3ee7d1a2d8b8bfc8134ea166fe80bc890b1ef035b070414f33516"} Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.765622 5021 scope.go:117] "RemoveContainer" containerID="1177abe509b1fc7d36535c70f37ab796f728a73afa2630b7247a37b263d96673" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.765710 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.770600 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-bk98m_06ba8703-2573-4c30-82ec-36290cf378f4/ovs-vswitchd/0.log" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.771411 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-bk98m" event={"ID":"06ba8703-2573-4c30-82ec-36290cf378f4","Type":"ContainerDied","Data":"09e83d565c30e4beb63307b7b985ec1a19e3da74831ae43eebc55f4d1e802e09"} Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.771451 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-bk98m" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.774401 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"4184ba08-6582-4367-abd3-9e9cffb5b716","Type":"ContainerDied","Data":"2b11734da378e226d33ccec38e1180b54b38a46a04d8b34460087053ace814d4"} Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.774548 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.793592 5021 scope.go:117] "RemoveContainer" containerID="d0d02b0697c6f0cdbe32b4c15779fa2b7fb9db8ad0d4beee7917a8570d9ce131" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.810979 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-ovs-bk98m"] Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.820222 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-ovs-bk98m"] Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.829273 5021 scope.go:117] "RemoveContainer" containerID="0b98874dda34c3adb9708dfa4fddca97d42d24280001e6ca51c29fdf4e04e366" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.849703 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-storage-0"] Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.857198 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-storage-0"] Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.861707 5021 scope.go:117] "RemoveContainer" containerID="21f76b84c77562932f1ebb5a263ddfe5a755ae6258ad955ca59a13307d229d84" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.865046 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-galera-0"] Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.870663 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstack-galera-0"] Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.904168 5021 scope.go:117] "RemoveContainer" containerID="286ee38dedf5ca3a893d36e49ab99761202c13f3e2d7786385e279604c029ca3" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.928056 5021 scope.go:117] "RemoveContainer" containerID="a89b180c0135b475ed1ba2315e698962a948ee0a359d0c123f97f5bef6cca782" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.949238 5021 scope.go:117] "RemoveContainer" containerID="70645c7bc51255ddd66eef76b13c0c8daa2f66c30285a645c871c40c0117099a" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.969082 5021 scope.go:117] "RemoveContainer" containerID="1ca716b9f11f9eb3707f3cd9724e75ee4eb6224c4c1e84903f22f728f45b5a6e" Jan 21 15:49:23 crc kubenswrapper[5021]: I0121 15:49:23.989026 5021 scope.go:117] "RemoveContainer" containerID="ebcb5861aa10209409b721ea6e382ae9a04e2327d3329449b46709721ed4a126" Jan 21 15:49:24 crc kubenswrapper[5021]: I0121 15:49:24.008259 5021 scope.go:117] "RemoveContainer" containerID="de92f672063435b72b37aaebf43b6130f273d9c86bb2fbfd7c96ca15e567638a" Jan 21 15:49:24 crc kubenswrapper[5021]: I0121 15:49:24.025274 5021 scope.go:117] "RemoveContainer" containerID="6107623cd8f4072bc502c561748852925819d887d3f75272057a2e95b4ad1df7" Jan 21 15:49:24 crc kubenswrapper[5021]: I0121 15:49:24.044155 5021 scope.go:117] "RemoveContainer" containerID="26edaec702317ca592975b15ba32e49f2dbd21f92807d5d36fce7823804ed53c" Jan 21 15:49:24 crc kubenswrapper[5021]: I0121 15:49:24.063422 5021 scope.go:117] "RemoveContainer" containerID="0d1d92941497d1a0e50ff5085e977bdf5928704f84bf870731efcc8fcb1d2f1c" Jan 21 15:49:24 crc kubenswrapper[5021]: I0121 15:49:24.083873 5021 scope.go:117] "RemoveContainer" containerID="c5661f129fdffc8a3bd461399ca660bc553970556f3c6af116113c924c302646" Jan 21 15:49:24 crc kubenswrapper[5021]: I0121 15:49:24.107725 5021 scope.go:117] "RemoveContainer" containerID="5801c40336a7430220f5050e7b4c6fc8997538d48d91daa78538847e03eb5b9c" Jan 21 15:49:24 crc kubenswrapper[5021]: I0121 15:49:24.128508 5021 scope.go:117] "RemoveContainer" containerID="c9aea0f956b83c345479dd657b4586b966e9d9026a0a0d504fe374b1d7498204" Jan 21 15:49:24 crc kubenswrapper[5021]: I0121 15:49:24.153397 5021 scope.go:117] "RemoveContainer" containerID="c51e2000b1b5cdd558697f3c6e26fdd8147721993df1b96e0e8e62f974486b5d" Jan 21 15:49:24 crc kubenswrapper[5021]: I0121 15:49:24.181325 5021 scope.go:117] "RemoveContainer" containerID="b438938db2d41c72df61e1de39db4e6370631315aee4d17170236638d0e182e1" Jan 21 15:49:24 crc kubenswrapper[5021]: I0121 15:49:24.213970 5021 scope.go:117] "RemoveContainer" containerID="4a59683749a5d31a2fcc97158cd1d9ceb81e127e3f847cce0c972bac9d288d8e" Jan 21 15:49:24 crc kubenswrapper[5021]: I0121 15:49:24.236759 5021 scope.go:117] "RemoveContainer" containerID="782dafa1c4c422293d7b99197b47f6a2f821efea69c75664d9a0de6a44a0cdee" Jan 21 15:49:24 crc kubenswrapper[5021]: I0121 15:49:24.751652 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="06ba8703-2573-4c30-82ec-36290cf378f4" path="/var/lib/kubelet/pods/06ba8703-2573-4c30-82ec-36290cf378f4/volumes" Jan 21 15:49:24 crc kubenswrapper[5021]: I0121 15:49:24.753200 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4184ba08-6582-4367-abd3-9e9cffb5b716" path="/var/lib/kubelet/pods/4184ba08-6582-4367-abd3-9e9cffb5b716/volumes" Jan 21 15:49:24 crc kubenswrapper[5021]: I0121 15:49:24.753854 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" path="/var/lib/kubelet/pods/c57ca8a9-e2f8-4404-b56f-649297cba618/volumes" Jan 21 15:49:24 crc kubenswrapper[5021]: I0121 15:49:24.850460 5021 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","pod7e624ae4-b10e-41c8-a09d-9b81cc213cf6"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort pod7e624ae4-b10e-41c8-a09d-9b81cc213cf6] : Timed out while waiting for systemd to remove kubepods-besteffort-pod7e624ae4_b10e_41c8_a09d_9b81cc213cf6.slice" Jan 21 15:49:24 crc kubenswrapper[5021]: E0121 15:49:24.850544 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to delete cgroup paths for [kubepods besteffort pod7e624ae4-b10e-41c8-a09d-9b81cc213cf6] : unable to destroy cgroup paths for cgroup [kubepods besteffort pod7e624ae4-b10e-41c8-a09d-9b81cc213cf6] : Timed out while waiting for systemd to remove kubepods-besteffort-pod7e624ae4_b10e_41c8_a09d_9b81cc213cf6.slice" pod="openstack/nova-cell1-novncproxy-0" podUID="7e624ae4-b10e-41c8-a09d-9b81cc213cf6" Jan 21 15:49:25 crc kubenswrapper[5021]: I0121 15:49:25.802602 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 21 15:49:25 crc kubenswrapper[5021]: I0121 15:49:25.837065 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 21 15:49:25 crc kubenswrapper[5021]: I0121 15:49:25.845500 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 21 15:49:26 crc kubenswrapper[5021]: I0121 15:49:26.749152 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7e624ae4-b10e-41c8-a09d-9b81cc213cf6" path="/var/lib/kubelet/pods/7e624ae4-b10e-41c8-a09d-9b81cc213cf6/volumes" Jan 21 15:49:42 crc kubenswrapper[5021]: I0121 15:49:42.356562 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 15:49:42 crc kubenswrapper[5021]: I0121 15:49:42.357065 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 15:49:46 crc kubenswrapper[5021]: I0121 15:49:46.125453 5021 scope.go:117] "RemoveContainer" containerID="4ff75ecc9879f9ef7c2c63daca2d6a319aca60b1fdeec2f4c91577214aa4ee48" Jan 21 15:49:46 crc kubenswrapper[5021]: I0121 15:49:46.163341 5021 scope.go:117] "RemoveContainer" containerID="9805801ced59f3c7c70da4ad1d539dbba64f039ddf070e12fe98335895ee1ee0" Jan 21 15:49:46 crc kubenswrapper[5021]: I0121 15:49:46.186888 5021 scope.go:117] "RemoveContainer" containerID="86f743359fcd566d9693e93514f567f4069f7d3c2bd29f4a5c9c25611ca370f2" Jan 21 15:49:46 crc kubenswrapper[5021]: I0121 15:49:46.213334 5021 scope.go:117] "RemoveContainer" containerID="c0b27d775f9f55733583fce8881bdf4ecef6a9bf5619a72c6e3c25574d30ebd4" Jan 21 15:49:46 crc kubenswrapper[5021]: I0121 15:49:46.246421 5021 scope.go:117] "RemoveContainer" containerID="d8edd25cbef949a767e8ddceea01ada7fe08756600d6369521174db0ff452f7b" Jan 21 15:49:46 crc kubenswrapper[5021]: I0121 15:49:46.264714 5021 scope.go:117] "RemoveContainer" containerID="594ea29889407f9a55293be2914d237093beac154f797e7c4a55390c7e75fbb3" Jan 21 15:49:46 crc kubenswrapper[5021]: I0121 15:49:46.299883 5021 scope.go:117] "RemoveContainer" containerID="c45563f373ab91b4ce46332e301191ed228473ee66b6a53b2393f1dfc4a4b932" Jan 21 15:49:46 crc kubenswrapper[5021]: I0121 15:49:46.319935 5021 scope.go:117] "RemoveContainer" containerID="57118c6cf18fe5a886fca20a8b4b0c89ee968b833cf0660219fa9f27ca939db1" Jan 21 15:49:46 crc kubenswrapper[5021]: I0121 15:49:46.338213 5021 scope.go:117] "RemoveContainer" containerID="cc610cc760d0bea04b57dbf46669247b99b993ae8d42b6b8d4657eee8f75076e" Jan 21 15:49:46 crc kubenswrapper[5021]: I0121 15:49:46.354667 5021 scope.go:117] "RemoveContainer" containerID="aac21446272396c3ae28db92c60ba6f45b63f13ffdfc027e64f67eb5ae638216" Jan 21 15:49:46 crc kubenswrapper[5021]: I0121 15:49:46.371490 5021 scope.go:117] "RemoveContainer" containerID="82a3b32b6f797feb5d696f8b9c2c893157b9cf8fc0d12d892ee901637e843282" Jan 21 15:49:46 crc kubenswrapper[5021]: I0121 15:49:46.394116 5021 scope.go:117] "RemoveContainer" containerID="bfac950ffa328d62ce1ff0c4ca1ef8145a00d3450ad701a49fae566a5600191f" Jan 21 15:49:46 crc kubenswrapper[5021]: I0121 15:49:46.411737 5021 scope.go:117] "RemoveContainer" containerID="1ebf274ec023a6a6faddf6806722fad6f7aed2d5c92575b0db6e591fcee16298" Jan 21 15:49:46 crc kubenswrapper[5021]: I0121 15:49:46.435011 5021 scope.go:117] "RemoveContainer" containerID="e4d8cebebda28f065fc3c55292b1eafcf78e8af726bb99a63b7e36fea292dfcd" Jan 21 15:49:46 crc kubenswrapper[5021]: I0121 15:49:46.454453 5021 scope.go:117] "RemoveContainer" containerID="51d07b884b949c7a2964dc2ea552d145b9ced67ceec056ed46d3fe877a277f2e" Jan 21 15:49:46 crc kubenswrapper[5021]: I0121 15:49:46.477396 5021 scope.go:117] "RemoveContainer" containerID="f379b38122b79af8352111451a3c240ec9ee109be0b0e1e000c38625d2933c70" Jan 21 15:49:46 crc kubenswrapper[5021]: I0121 15:49:46.498719 5021 scope.go:117] "RemoveContainer" containerID="1b8f2c8cd08e817b6588454ef88ddaf1746ece06c08ba02b29cbe9d2437827d0" Jan 21 15:49:46 crc kubenswrapper[5021]: I0121 15:49:46.518750 5021 scope.go:117] "RemoveContainer" containerID="6993e642b208ca097fabad301b80dadf17523b826c5d300e040f520c7609d465" Jan 21 15:49:46 crc kubenswrapper[5021]: I0121 15:49:46.542447 5021 scope.go:117] "RemoveContainer" containerID="713460210feced6a89b3ba91549f632d927deefdaf7fb750b7607564f3cb34c8" Jan 21 15:50:12 crc kubenswrapper[5021]: I0121 15:50:12.357436 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 15:50:12 crc kubenswrapper[5021]: I0121 15:50:12.359049 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.001556 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-hnr98"] Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.005478 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1093d499-bd73-4de4-b999-a7e9835b3124" containerName="mysql-bootstrap" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.005605 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="1093d499-bd73-4de4-b999-a7e9835b3124" containerName="mysql-bootstrap" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.005695 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="account-server" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.005751 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="account-server" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.005839 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ccf7211-3a03-41f1-839a-7bda93e55d4b" containerName="cinder-api-log" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.005904 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ccf7211-3a03-41f1-839a-7bda93e55d4b" containerName="cinder-api-log" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.005998 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b" containerName="setup-container" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.006058 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b" containerName="setup-container" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.006132 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55bbd00b-56a2-42a4-a75a-39daba5e3ba6" containerName="ceilometer-notification-agent" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.006239 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="55bbd00b-56a2-42a4-a75a-39daba5e3ba6" containerName="ceilometer-notification-agent" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.006321 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="469c5416-c102-43c5-8801-502231a86238" containerName="placement-log" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.006387 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="469c5416-c102-43c5-8801-502231a86238" containerName="placement-log" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.006443 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="account-reaper" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.006538 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="account-reaper" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.006624 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="account-replicator" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.006698 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="account-replicator" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.006778 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ccf7211-3a03-41f1-839a-7bda93e55d4b" containerName="cinder-api" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.006833 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ccf7211-3a03-41f1-839a-7bda93e55d4b" containerName="cinder-api" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.006890 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4d4c24c-c623-4b7a-92e2-151d132cdebf" containerName="keystone-api" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.006968 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4d4c24c-c623-4b7a-92e2-151d132cdebf" containerName="keystone-api" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.007032 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="object-expirer" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.007113 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="object-expirer" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.007212 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55bbd00b-56a2-42a4-a75a-39daba5e3ba6" containerName="ceilometer-central-agent" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.007277 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="55bbd00b-56a2-42a4-a75a-39daba5e3ba6" containerName="ceilometer-central-agent" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.007340 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="rsync" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.007396 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="rsync" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.007460 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5d30216-0406-4ff3-a645-880381c2a661" containerName="ovn-controller" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.007534 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5d30216-0406-4ff3-a645-880381c2a661" containerName="ovn-controller" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.007625 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="110a1110-f52a-40e4-8402-166be87650a8" containerName="nova-metadata-metadata" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.007710 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="110a1110-f52a-40e4-8402-166be87650a8" containerName="nova-metadata-metadata" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.007792 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1093d499-bd73-4de4-b999-a7e9835b3124" containerName="galera" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.007867 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="1093d499-bd73-4de4-b999-a7e9835b3124" containerName="galera" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.007976 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fad66107-0589-4ed8-94dc-fd29f2f58c43" containerName="barbican-api-log" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.008049 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="fad66107-0589-4ed8-94dc-fd29f2f58c43" containerName="barbican-api-log" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.008125 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddbf76eb-0e2a-4332-b741-0e0b63b60465" containerName="neutron-httpd" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.008410 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddbf76eb-0e2a-4332-b741-0e0b63b60465" containerName="neutron-httpd" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.008488 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0415e622-e0cf-4097-865a-a0970f2acc07" containerName="glance-httpd" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.008553 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="0415e622-e0cf-4097-865a-a0970f2acc07" containerName="glance-httpd" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.008611 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0787e96e-5c19-467d-9ad4-ec70202c8cdf" containerName="nova-scheduler-scheduler" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.008671 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="0787e96e-5c19-467d-9ad4-ec70202c8cdf" containerName="nova-scheduler-scheduler" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.008728 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06ba8703-2573-4c30-82ec-36290cf378f4" containerName="ovs-vswitchd" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.008780 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="06ba8703-2573-4c30-82ec-36290cf378f4" containerName="ovs-vswitchd" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.008840 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55bbd00b-56a2-42a4-a75a-39daba5e3ba6" containerName="sg-core" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.008894 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="55bbd00b-56a2-42a4-a75a-39daba5e3ba6" containerName="sg-core" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.008972 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="object-server" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.009041 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="object-server" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.009098 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="account-auditor" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.009159 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="account-auditor" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.009217 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="container-updater" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.009282 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="container-updater" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.009379 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e7a38a9d-65cf-48dd-8f36-44a78a53e48f" containerName="barbican-keystone-listener" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.009462 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="e7a38a9d-65cf-48dd-8f36-44a78a53e48f" containerName="barbican-keystone-listener" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.010762 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2dff28e1-6d0f-4a7d-8fcf-0edf26e63825" containerName="rabbitmq" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.010853 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="2dff28e1-6d0f-4a7d-8fcf-0edf26e63825" containerName="rabbitmq" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.010957 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e7a38a9d-65cf-48dd-8f36-44a78a53e48f" containerName="barbican-keystone-listener-log" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.011029 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="e7a38a9d-65cf-48dd-8f36-44a78a53e48f" containerName="barbican-keystone-listener-log" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.011113 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="container-auditor" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.011206 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="container-auditor" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.011278 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="swift-recon-cron" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.011332 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="swift-recon-cron" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.011388 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="469c5416-c102-43c5-8801-502231a86238" containerName="placement-api" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.011440 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="469c5416-c102-43c5-8801-502231a86238" containerName="placement-api" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.011494 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddbf76eb-0e2a-4332-b741-0e0b63b60465" containerName="neutron-api" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.011569 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddbf76eb-0e2a-4332-b741-0e0b63b60465" containerName="neutron-api" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.011657 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b" containerName="rabbitmq" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.011734 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b" containerName="rabbitmq" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.011809 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad959625-d43f-48c3-b42f-d35e63e9af44" containerName="nova-api-api" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.011934 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad959625-d43f-48c3-b42f-d35e63e9af44" containerName="nova-api-api" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.012009 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e56d063f-18e5-49af-8bfc-892629a34e88" containerName="nova-cell0-conductor-conductor" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.012077 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="e56d063f-18e5-49af-8bfc-892629a34e88" containerName="nova-cell0-conductor-conductor" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.012170 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06ba8703-2573-4c30-82ec-36290cf378f4" containerName="ovsdb-server" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.012226 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="06ba8703-2573-4c30-82ec-36290cf378f4" containerName="ovsdb-server" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.012282 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4184ba08-6582-4367-abd3-9e9cffb5b716" containerName="mysql-bootstrap" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.012335 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="4184ba08-6582-4367-abd3-9e9cffb5b716" containerName="mysql-bootstrap" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.012388 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb584a2d-b396-4850-a7b5-3d827c42fe5a" containerName="memcached" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.012439 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb584a2d-b396-4850-a7b5-3d827c42fe5a" containerName="memcached" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.012497 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06ba8703-2573-4c30-82ec-36290cf378f4" containerName="ovsdb-server-init" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.012545 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="06ba8703-2573-4c30-82ec-36290cf378f4" containerName="ovsdb-server-init" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.012601 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="446aadfb-ac91-4335-9bac-4f8d7663ab6a" containerName="glance-log" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.012653 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="446aadfb-ac91-4335-9bac-4f8d7663ab6a" containerName="glance-log" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.012706 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="container-replicator" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.012757 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="container-replicator" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.012814 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4184ba08-6582-4367-abd3-9e9cffb5b716" containerName="galera" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.012868 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="4184ba08-6582-4367-abd3-9e9cffb5b716" containerName="galera" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.012941 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad959625-d43f-48c3-b42f-d35e63e9af44" containerName="nova-api-log" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.013003 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad959625-d43f-48c3-b42f-d35e63e9af44" containerName="nova-api-log" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.013061 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d076ab0-b0c8-48a0-baa0-589c99376c72" containerName="barbican-worker-log" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.013115 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d076ab0-b0c8-48a0-baa0-589c99376c72" containerName="barbican-worker-log" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.013837 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d076ab0-b0c8-48a0-baa0-589c99376c72" containerName="barbican-worker" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.013923 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d076ab0-b0c8-48a0-baa0-589c99376c72" containerName="barbican-worker" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.013988 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fad66107-0589-4ed8-94dc-fd29f2f58c43" containerName="barbican-api" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.014042 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="fad66107-0589-4ed8-94dc-fd29f2f58c43" containerName="barbican-api" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.014098 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="object-replicator" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.014155 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="object-replicator" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.014240 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="object-auditor" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.014559 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="object-auditor" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.014676 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="object-updater" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.014755 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="object-updater" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.014830 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5073fbf8-f2ef-49e7-8b07-d90b1822b414" containerName="kube-state-metrics" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.014892 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="5073fbf8-f2ef-49e7-8b07-d90b1822b414" containerName="kube-state-metrics" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.015026 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0415e622-e0cf-4097-865a-a0970f2acc07" containerName="glance-log" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.015123 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="0415e622-e0cf-4097-865a-a0970f2acc07" containerName="glance-log" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.015231 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2dff28e1-6d0f-4a7d-8fcf-0edf26e63825" containerName="setup-container" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.015308 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="2dff28e1-6d0f-4a7d-8fcf-0edf26e63825" containerName="setup-container" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.015368 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55bbd00b-56a2-42a4-a75a-39daba5e3ba6" containerName="proxy-httpd" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.015425 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="55bbd00b-56a2-42a4-a75a-39daba5e3ba6" containerName="proxy-httpd" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.015489 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6ed9dcf-812f-4945-ac9d-43839bb27349" containerName="nova-cell1-conductor-conductor" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.015543 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6ed9dcf-812f-4945-ac9d-43839bb27349" containerName="nova-cell1-conductor-conductor" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.015601 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="446aadfb-ac91-4335-9bac-4f8d7663ab6a" containerName="glance-httpd" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.015654 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="446aadfb-ac91-4335-9bac-4f8d7663ab6a" containerName="glance-httpd" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.015733 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="110a1110-f52a-40e4-8402-166be87650a8" containerName="nova-metadata-log" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.015791 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="110a1110-f52a-40e4-8402-166be87650a8" containerName="nova-metadata-log" Jan 21 15:50:15 crc kubenswrapper[5021]: E0121 15:50:15.015857 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="container-server" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.015956 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="container-server" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.016407 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="e7a38a9d-65cf-48dd-8f36-44a78a53e48f" containerName="barbican-keystone-listener" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.016492 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="a6ed9dcf-812f-4945-ac9d-43839bb27349" containerName="nova-cell1-conductor-conductor" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.016579 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="0787e96e-5c19-467d-9ad4-ec70202c8cdf" containerName="nova-scheduler-scheduler" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.016656 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="e7a38a9d-65cf-48dd-8f36-44a78a53e48f" containerName="barbican-keystone-listener-log" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.016733 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad959625-d43f-48c3-b42f-d35e63e9af44" containerName="nova-api-api" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.016812 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="fad66107-0589-4ed8-94dc-fd29f2f58c43" containerName="barbican-api-log" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.017015 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="55bbd00b-56a2-42a4-a75a-39daba5e3ba6" containerName="ceilometer-notification-agent" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.017096 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="06ba8703-2573-4c30-82ec-36290cf378f4" containerName="ovs-vswitchd" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.017170 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="469c5416-c102-43c5-8801-502231a86238" containerName="placement-api" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.017226 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="5073fbf8-f2ef-49e7-8b07-d90b1822b414" containerName="kube-state-metrics" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.017297 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb584a2d-b396-4850-a7b5-3d827c42fe5a" containerName="memcached" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.017368 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="account-auditor" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.017437 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="55bbd00b-56a2-42a4-a75a-39daba5e3ba6" containerName="ceilometer-central-agent" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.017516 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="fad66107-0589-4ed8-94dc-fd29f2f58c43" containerName="barbican-api" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.019311 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="1093d499-bd73-4de4-b999-a7e9835b3124" containerName="galera" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.019399 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d076ab0-b0c8-48a0-baa0-589c99376c72" containerName="barbican-worker" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.019489 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="ddbf76eb-0e2a-4332-b741-0e0b63b60465" containerName="neutron-httpd" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.019567 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="0415e622-e0cf-4097-865a-a0970f2acc07" containerName="glance-httpd" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.019639 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="110a1110-f52a-40e4-8402-166be87650a8" containerName="nova-metadata-metadata" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.020015 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="7ccf7211-3a03-41f1-839a-7bda93e55d4b" containerName="cinder-api" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.020113 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="account-replicator" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.020184 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="container-updater" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.020272 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="object-expirer" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.020352 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="container-replicator" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.020426 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="4184ba08-6582-4367-abd3-9e9cffb5b716" containerName="galera" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.020669 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="446aadfb-ac91-4335-9bac-4f8d7663ab6a" containerName="glance-httpd" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.020881 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="0415e622-e0cf-4097-865a-a0970f2acc07" containerName="glance-log" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.021564 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad959625-d43f-48c3-b42f-d35e63e9af44" containerName="nova-api-log" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.021648 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="account-reaper" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.021729 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="7ccf7211-3a03-41f1-839a-7bda93e55d4b" containerName="cinder-api-log" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.021804 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="110a1110-f52a-40e4-8402-166be87650a8" containerName="nova-metadata-log" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.021880 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="ddbf76eb-0e2a-4332-b741-0e0b63b60465" containerName="neutron-api" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.021971 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="a5d30216-0406-4ff3-a645-880381c2a661" containerName="ovn-controller" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.022048 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="rsync" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.022128 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="account-server" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.022201 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="2dff28e1-6d0f-4a7d-8fcf-0edf26e63825" containerName="rabbitmq" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.022278 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="55bbd00b-56a2-42a4-a75a-39daba5e3ba6" containerName="proxy-httpd" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.022331 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4d4c24c-c623-4b7a-92e2-151d132cdebf" containerName="keystone-api" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.022409 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="object-server" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.022484 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="container-auditor" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.022617 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="55bbd00b-56a2-42a4-a75a-39daba5e3ba6" containerName="sg-core" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.022702 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="object-replicator" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.022820 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="469c5416-c102-43c5-8801-502231a86238" containerName="placement-log" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.022941 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6b5666a-5cee-4b3d-9ac4-2c34962c1d9b" containerName="rabbitmq" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.023033 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="446aadfb-ac91-4335-9bac-4f8d7663ab6a" containerName="glance-log" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.023137 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="swift-recon-cron" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.023215 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d076ab0-b0c8-48a0-baa0-589c99376c72" containerName="barbican-worker-log" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.023300 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="e56d063f-18e5-49af-8bfc-892629a34e88" containerName="nova-cell0-conductor-conductor" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.023380 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="object-auditor" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.023455 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="object-updater" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.023515 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="06ba8703-2573-4c30-82ec-36290cf378f4" containerName="ovsdb-server" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.023586 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="c57ca8a9-e2f8-4404-b56f-649297cba618" containerName="container-server" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.025122 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-hnr98"] Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.025327 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hnr98" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.137070 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c0fe5022-120f-42ca-b8a3-45790cea1e35-catalog-content\") pod \"community-operators-hnr98\" (UID: \"c0fe5022-120f-42ca-b8a3-45790cea1e35\") " pod="openshift-marketplace/community-operators-hnr98" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.137133 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c0fe5022-120f-42ca-b8a3-45790cea1e35-utilities\") pod \"community-operators-hnr98\" (UID: \"c0fe5022-120f-42ca-b8a3-45790cea1e35\") " pod="openshift-marketplace/community-operators-hnr98" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.137233 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bbl66\" (UniqueName: \"kubernetes.io/projected/c0fe5022-120f-42ca-b8a3-45790cea1e35-kube-api-access-bbl66\") pod \"community-operators-hnr98\" (UID: \"c0fe5022-120f-42ca-b8a3-45790cea1e35\") " pod="openshift-marketplace/community-operators-hnr98" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.238979 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c0fe5022-120f-42ca-b8a3-45790cea1e35-catalog-content\") pod \"community-operators-hnr98\" (UID: \"c0fe5022-120f-42ca-b8a3-45790cea1e35\") " pod="openshift-marketplace/community-operators-hnr98" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.239038 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c0fe5022-120f-42ca-b8a3-45790cea1e35-utilities\") pod \"community-operators-hnr98\" (UID: \"c0fe5022-120f-42ca-b8a3-45790cea1e35\") " pod="openshift-marketplace/community-operators-hnr98" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.239179 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bbl66\" (UniqueName: \"kubernetes.io/projected/c0fe5022-120f-42ca-b8a3-45790cea1e35-kube-api-access-bbl66\") pod \"community-operators-hnr98\" (UID: \"c0fe5022-120f-42ca-b8a3-45790cea1e35\") " pod="openshift-marketplace/community-operators-hnr98" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.239573 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c0fe5022-120f-42ca-b8a3-45790cea1e35-catalog-content\") pod \"community-operators-hnr98\" (UID: \"c0fe5022-120f-42ca-b8a3-45790cea1e35\") " pod="openshift-marketplace/community-operators-hnr98" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.239573 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c0fe5022-120f-42ca-b8a3-45790cea1e35-utilities\") pod \"community-operators-hnr98\" (UID: \"c0fe5022-120f-42ca-b8a3-45790cea1e35\") " pod="openshift-marketplace/community-operators-hnr98" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.261327 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bbl66\" (UniqueName: \"kubernetes.io/projected/c0fe5022-120f-42ca-b8a3-45790cea1e35-kube-api-access-bbl66\") pod \"community-operators-hnr98\" (UID: \"c0fe5022-120f-42ca-b8a3-45790cea1e35\") " pod="openshift-marketplace/community-operators-hnr98" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.356818 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hnr98" Jan 21 15:50:15 crc kubenswrapper[5021]: I0121 15:50:15.887470 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-hnr98"] Jan 21 15:50:16 crc kubenswrapper[5021]: I0121 15:50:16.243479 5021 generic.go:334] "Generic (PLEG): container finished" podID="c0fe5022-120f-42ca-b8a3-45790cea1e35" containerID="253daeb291c88fed74a36b24d25158405eb6dc3190b735cc9599ad05570e46d5" exitCode=0 Jan 21 15:50:16 crc kubenswrapper[5021]: I0121 15:50:16.243759 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hnr98" event={"ID":"c0fe5022-120f-42ca-b8a3-45790cea1e35","Type":"ContainerDied","Data":"253daeb291c88fed74a36b24d25158405eb6dc3190b735cc9599ad05570e46d5"} Jan 21 15:50:16 crc kubenswrapper[5021]: I0121 15:50:16.243790 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hnr98" event={"ID":"c0fe5022-120f-42ca-b8a3-45790cea1e35","Type":"ContainerStarted","Data":"d0a80bbc69a23b34c3af151cf53980594444039aa3435596d315194e5b5da0cf"} Jan 21 15:50:18 crc kubenswrapper[5021]: I0121 15:50:18.258064 5021 generic.go:334] "Generic (PLEG): container finished" podID="c0fe5022-120f-42ca-b8a3-45790cea1e35" containerID="5c4aba4f2a0965386eeb3c5144a6e9b5a4732d694a8782fcd0c059863ed960a4" exitCode=0 Jan 21 15:50:18 crc kubenswrapper[5021]: I0121 15:50:18.258117 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hnr98" event={"ID":"c0fe5022-120f-42ca-b8a3-45790cea1e35","Type":"ContainerDied","Data":"5c4aba4f2a0965386eeb3c5144a6e9b5a4732d694a8782fcd0c059863ed960a4"} Jan 21 15:50:19 crc kubenswrapper[5021]: I0121 15:50:19.267958 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hnr98" event={"ID":"c0fe5022-120f-42ca-b8a3-45790cea1e35","Type":"ContainerStarted","Data":"0d9a1271aa6ffe55c2d832116200076a51170b1af240f8bd647357bc5e97b0a2"} Jan 21 15:50:19 crc kubenswrapper[5021]: I0121 15:50:19.289607 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-hnr98" podStartSLOduration=2.642686546 podStartE2EDuration="5.289586662s" podCreationTimestamp="2026-01-21 15:50:14 +0000 UTC" firstStartedPulling="2026-01-21 15:50:16.245420459 +0000 UTC m=+1557.780534348" lastFinishedPulling="2026-01-21 15:50:18.892320575 +0000 UTC m=+1560.427434464" observedRunningTime="2026-01-21 15:50:19.283347239 +0000 UTC m=+1560.818461158" watchObservedRunningTime="2026-01-21 15:50:19.289586662 +0000 UTC m=+1560.824700561" Jan 21 15:50:25 crc kubenswrapper[5021]: I0121 15:50:25.351452 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-slxmd"] Jan 21 15:50:25 crc kubenswrapper[5021]: I0121 15:50:25.353731 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-slxmd" Jan 21 15:50:25 crc kubenswrapper[5021]: I0121 15:50:25.357112 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-hnr98" Jan 21 15:50:25 crc kubenswrapper[5021]: I0121 15:50:25.357268 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-hnr98" Jan 21 15:50:25 crc kubenswrapper[5021]: I0121 15:50:25.377749 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-slxmd"] Jan 21 15:50:25 crc kubenswrapper[5021]: I0121 15:50:25.415430 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-hnr98" Jan 21 15:50:25 crc kubenswrapper[5021]: I0121 15:50:25.506741 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ac98c530-4c15-4d36-b5bb-1216caadb18d-catalog-content\") pod \"certified-operators-slxmd\" (UID: \"ac98c530-4c15-4d36-b5bb-1216caadb18d\") " pod="openshift-marketplace/certified-operators-slxmd" Jan 21 15:50:25 crc kubenswrapper[5021]: I0121 15:50:25.506832 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ac98c530-4c15-4d36-b5bb-1216caadb18d-utilities\") pod \"certified-operators-slxmd\" (UID: \"ac98c530-4c15-4d36-b5bb-1216caadb18d\") " pod="openshift-marketplace/certified-operators-slxmd" Jan 21 15:50:25 crc kubenswrapper[5021]: I0121 15:50:25.506928 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9bbfk\" (UniqueName: \"kubernetes.io/projected/ac98c530-4c15-4d36-b5bb-1216caadb18d-kube-api-access-9bbfk\") pod \"certified-operators-slxmd\" (UID: \"ac98c530-4c15-4d36-b5bb-1216caadb18d\") " pod="openshift-marketplace/certified-operators-slxmd" Jan 21 15:50:25 crc kubenswrapper[5021]: I0121 15:50:25.608163 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ac98c530-4c15-4d36-b5bb-1216caadb18d-utilities\") pod \"certified-operators-slxmd\" (UID: \"ac98c530-4c15-4d36-b5bb-1216caadb18d\") " pod="openshift-marketplace/certified-operators-slxmd" Jan 21 15:50:25 crc kubenswrapper[5021]: I0121 15:50:25.608244 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9bbfk\" (UniqueName: \"kubernetes.io/projected/ac98c530-4c15-4d36-b5bb-1216caadb18d-kube-api-access-9bbfk\") pod \"certified-operators-slxmd\" (UID: \"ac98c530-4c15-4d36-b5bb-1216caadb18d\") " pod="openshift-marketplace/certified-operators-slxmd" Jan 21 15:50:25 crc kubenswrapper[5021]: I0121 15:50:25.608335 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ac98c530-4c15-4d36-b5bb-1216caadb18d-catalog-content\") pod \"certified-operators-slxmd\" (UID: \"ac98c530-4c15-4d36-b5bb-1216caadb18d\") " pod="openshift-marketplace/certified-operators-slxmd" Jan 21 15:50:25 crc kubenswrapper[5021]: I0121 15:50:25.608848 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ac98c530-4c15-4d36-b5bb-1216caadb18d-utilities\") pod \"certified-operators-slxmd\" (UID: \"ac98c530-4c15-4d36-b5bb-1216caadb18d\") " pod="openshift-marketplace/certified-operators-slxmd" Jan 21 15:50:25 crc kubenswrapper[5021]: I0121 15:50:25.608865 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ac98c530-4c15-4d36-b5bb-1216caadb18d-catalog-content\") pod \"certified-operators-slxmd\" (UID: \"ac98c530-4c15-4d36-b5bb-1216caadb18d\") " pod="openshift-marketplace/certified-operators-slxmd" Jan 21 15:50:25 crc kubenswrapper[5021]: I0121 15:50:25.633444 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9bbfk\" (UniqueName: \"kubernetes.io/projected/ac98c530-4c15-4d36-b5bb-1216caadb18d-kube-api-access-9bbfk\") pod \"certified-operators-slxmd\" (UID: \"ac98c530-4c15-4d36-b5bb-1216caadb18d\") " pod="openshift-marketplace/certified-operators-slxmd" Jan 21 15:50:25 crc kubenswrapper[5021]: I0121 15:50:25.689206 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-slxmd" Jan 21 15:50:26 crc kubenswrapper[5021]: I0121 15:50:26.206842 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-slxmd"] Jan 21 15:50:26 crc kubenswrapper[5021]: I0121 15:50:26.335764 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-slxmd" event={"ID":"ac98c530-4c15-4d36-b5bb-1216caadb18d","Type":"ContainerStarted","Data":"f4cf774c69ee43d5f606d9cb34ac2adb81784ba184d14b49a62d59a4420095a2"} Jan 21 15:50:26 crc kubenswrapper[5021]: I0121 15:50:26.394785 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-hnr98" Jan 21 15:50:27 crc kubenswrapper[5021]: I0121 15:50:27.343476 5021 generic.go:334] "Generic (PLEG): container finished" podID="ac98c530-4c15-4d36-b5bb-1216caadb18d" containerID="68e4ea7a1a485a1df214180ddf1a0ac4f772df8b2eca64241a7b941ab73314e7" exitCode=0 Jan 21 15:50:27 crc kubenswrapper[5021]: I0121 15:50:27.343606 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-slxmd" event={"ID":"ac98c530-4c15-4d36-b5bb-1216caadb18d","Type":"ContainerDied","Data":"68e4ea7a1a485a1df214180ddf1a0ac4f772df8b2eca64241a7b941ab73314e7"} Jan 21 15:50:27 crc kubenswrapper[5021]: I0121 15:50:27.729004 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-hnr98"] Jan 21 15:50:28 crc kubenswrapper[5021]: I0121 15:50:28.354792 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-slxmd" event={"ID":"ac98c530-4c15-4d36-b5bb-1216caadb18d","Type":"ContainerStarted","Data":"f9d64856415bd5548e07402344a8662cf72766eaafa65b2ee8aef61d414061c7"} Jan 21 15:50:29 crc kubenswrapper[5021]: I0121 15:50:29.364448 5021 generic.go:334] "Generic (PLEG): container finished" podID="ac98c530-4c15-4d36-b5bb-1216caadb18d" containerID="f9d64856415bd5548e07402344a8662cf72766eaafa65b2ee8aef61d414061c7" exitCode=0 Jan 21 15:50:29 crc kubenswrapper[5021]: I0121 15:50:29.364771 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-hnr98" podUID="c0fe5022-120f-42ca-b8a3-45790cea1e35" containerName="registry-server" containerID="cri-o://0d9a1271aa6ffe55c2d832116200076a51170b1af240f8bd647357bc5e97b0a2" gracePeriod=2 Jan 21 15:50:29 crc kubenswrapper[5021]: I0121 15:50:29.366311 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-slxmd" event={"ID":"ac98c530-4c15-4d36-b5bb-1216caadb18d","Type":"ContainerDied","Data":"f9d64856415bd5548e07402344a8662cf72766eaafa65b2ee8aef61d414061c7"} Jan 21 15:50:30 crc kubenswrapper[5021]: I0121 15:50:30.335028 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hnr98" Jan 21 15:50:30 crc kubenswrapper[5021]: I0121 15:50:30.378458 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-slxmd" event={"ID":"ac98c530-4c15-4d36-b5bb-1216caadb18d","Type":"ContainerStarted","Data":"f52d6962e082850219f71eb3a2ad9123f3fa1ac5fc639b5f48dc35569d0ab380"} Jan 21 15:50:30 crc kubenswrapper[5021]: I0121 15:50:30.382720 5021 generic.go:334] "Generic (PLEG): container finished" podID="c0fe5022-120f-42ca-b8a3-45790cea1e35" containerID="0d9a1271aa6ffe55c2d832116200076a51170b1af240f8bd647357bc5e97b0a2" exitCode=0 Jan 21 15:50:30 crc kubenswrapper[5021]: I0121 15:50:30.382785 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hnr98" event={"ID":"c0fe5022-120f-42ca-b8a3-45790cea1e35","Type":"ContainerDied","Data":"0d9a1271aa6ffe55c2d832116200076a51170b1af240f8bd647357bc5e97b0a2"} Jan 21 15:50:30 crc kubenswrapper[5021]: I0121 15:50:30.382823 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hnr98" event={"ID":"c0fe5022-120f-42ca-b8a3-45790cea1e35","Type":"ContainerDied","Data":"d0a80bbc69a23b34c3af151cf53980594444039aa3435596d315194e5b5da0cf"} Jan 21 15:50:30 crc kubenswrapper[5021]: I0121 15:50:30.382844 5021 scope.go:117] "RemoveContainer" containerID="0d9a1271aa6ffe55c2d832116200076a51170b1af240f8bd647357bc5e97b0a2" Jan 21 15:50:30 crc kubenswrapper[5021]: I0121 15:50:30.382926 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hnr98" Jan 21 15:50:30 crc kubenswrapper[5021]: I0121 15:50:30.406671 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-slxmd" podStartSLOduration=2.852824981 podStartE2EDuration="5.406647366s" podCreationTimestamp="2026-01-21 15:50:25 +0000 UTC" firstStartedPulling="2026-01-21 15:50:27.345248511 +0000 UTC m=+1568.880362400" lastFinishedPulling="2026-01-21 15:50:29.899070896 +0000 UTC m=+1571.434184785" observedRunningTime="2026-01-21 15:50:30.404117606 +0000 UTC m=+1571.939231495" watchObservedRunningTime="2026-01-21 15:50:30.406647366 +0000 UTC m=+1571.941761255" Jan 21 15:50:30 crc kubenswrapper[5021]: I0121 15:50:30.412443 5021 scope.go:117] "RemoveContainer" containerID="5c4aba4f2a0965386eeb3c5144a6e9b5a4732d694a8782fcd0c059863ed960a4" Jan 21 15:50:30 crc kubenswrapper[5021]: I0121 15:50:30.436235 5021 scope.go:117] "RemoveContainer" containerID="253daeb291c88fed74a36b24d25158405eb6dc3190b735cc9599ad05570e46d5" Jan 21 15:50:30 crc kubenswrapper[5021]: I0121 15:50:30.460467 5021 scope.go:117] "RemoveContainer" containerID="0d9a1271aa6ffe55c2d832116200076a51170b1af240f8bd647357bc5e97b0a2" Jan 21 15:50:30 crc kubenswrapper[5021]: E0121 15:50:30.460811 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0d9a1271aa6ffe55c2d832116200076a51170b1af240f8bd647357bc5e97b0a2\": container with ID starting with 0d9a1271aa6ffe55c2d832116200076a51170b1af240f8bd647357bc5e97b0a2 not found: ID does not exist" containerID="0d9a1271aa6ffe55c2d832116200076a51170b1af240f8bd647357bc5e97b0a2" Jan 21 15:50:30 crc kubenswrapper[5021]: I0121 15:50:30.460844 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d9a1271aa6ffe55c2d832116200076a51170b1af240f8bd647357bc5e97b0a2"} err="failed to get container status \"0d9a1271aa6ffe55c2d832116200076a51170b1af240f8bd647357bc5e97b0a2\": rpc error: code = NotFound desc = could not find container \"0d9a1271aa6ffe55c2d832116200076a51170b1af240f8bd647357bc5e97b0a2\": container with ID starting with 0d9a1271aa6ffe55c2d832116200076a51170b1af240f8bd647357bc5e97b0a2 not found: ID does not exist" Jan 21 15:50:30 crc kubenswrapper[5021]: I0121 15:50:30.460867 5021 scope.go:117] "RemoveContainer" containerID="5c4aba4f2a0965386eeb3c5144a6e9b5a4732d694a8782fcd0c059863ed960a4" Jan 21 15:50:30 crc kubenswrapper[5021]: E0121 15:50:30.461180 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5c4aba4f2a0965386eeb3c5144a6e9b5a4732d694a8782fcd0c059863ed960a4\": container with ID starting with 5c4aba4f2a0965386eeb3c5144a6e9b5a4732d694a8782fcd0c059863ed960a4 not found: ID does not exist" containerID="5c4aba4f2a0965386eeb3c5144a6e9b5a4732d694a8782fcd0c059863ed960a4" Jan 21 15:50:30 crc kubenswrapper[5021]: I0121 15:50:30.461227 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5c4aba4f2a0965386eeb3c5144a6e9b5a4732d694a8782fcd0c059863ed960a4"} err="failed to get container status \"5c4aba4f2a0965386eeb3c5144a6e9b5a4732d694a8782fcd0c059863ed960a4\": rpc error: code = NotFound desc = could not find container \"5c4aba4f2a0965386eeb3c5144a6e9b5a4732d694a8782fcd0c059863ed960a4\": container with ID starting with 5c4aba4f2a0965386eeb3c5144a6e9b5a4732d694a8782fcd0c059863ed960a4 not found: ID does not exist" Jan 21 15:50:30 crc kubenswrapper[5021]: I0121 15:50:30.461260 5021 scope.go:117] "RemoveContainer" containerID="253daeb291c88fed74a36b24d25158405eb6dc3190b735cc9599ad05570e46d5" Jan 21 15:50:30 crc kubenswrapper[5021]: E0121 15:50:30.461693 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"253daeb291c88fed74a36b24d25158405eb6dc3190b735cc9599ad05570e46d5\": container with ID starting with 253daeb291c88fed74a36b24d25158405eb6dc3190b735cc9599ad05570e46d5 not found: ID does not exist" containerID="253daeb291c88fed74a36b24d25158405eb6dc3190b735cc9599ad05570e46d5" Jan 21 15:50:30 crc kubenswrapper[5021]: I0121 15:50:30.461719 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"253daeb291c88fed74a36b24d25158405eb6dc3190b735cc9599ad05570e46d5"} err="failed to get container status \"253daeb291c88fed74a36b24d25158405eb6dc3190b735cc9599ad05570e46d5\": rpc error: code = NotFound desc = could not find container \"253daeb291c88fed74a36b24d25158405eb6dc3190b735cc9599ad05570e46d5\": container with ID starting with 253daeb291c88fed74a36b24d25158405eb6dc3190b735cc9599ad05570e46d5 not found: ID does not exist" Jan 21 15:50:30 crc kubenswrapper[5021]: I0121 15:50:30.505143 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bbl66\" (UniqueName: \"kubernetes.io/projected/c0fe5022-120f-42ca-b8a3-45790cea1e35-kube-api-access-bbl66\") pod \"c0fe5022-120f-42ca-b8a3-45790cea1e35\" (UID: \"c0fe5022-120f-42ca-b8a3-45790cea1e35\") " Jan 21 15:50:30 crc kubenswrapper[5021]: I0121 15:50:30.505255 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c0fe5022-120f-42ca-b8a3-45790cea1e35-catalog-content\") pod \"c0fe5022-120f-42ca-b8a3-45790cea1e35\" (UID: \"c0fe5022-120f-42ca-b8a3-45790cea1e35\") " Jan 21 15:50:30 crc kubenswrapper[5021]: I0121 15:50:30.505301 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c0fe5022-120f-42ca-b8a3-45790cea1e35-utilities\") pod \"c0fe5022-120f-42ca-b8a3-45790cea1e35\" (UID: \"c0fe5022-120f-42ca-b8a3-45790cea1e35\") " Jan 21 15:50:30 crc kubenswrapper[5021]: I0121 15:50:30.506240 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c0fe5022-120f-42ca-b8a3-45790cea1e35-utilities" (OuterVolumeSpecName: "utilities") pod "c0fe5022-120f-42ca-b8a3-45790cea1e35" (UID: "c0fe5022-120f-42ca-b8a3-45790cea1e35"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:50:30 crc kubenswrapper[5021]: I0121 15:50:30.511056 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c0fe5022-120f-42ca-b8a3-45790cea1e35-kube-api-access-bbl66" (OuterVolumeSpecName: "kube-api-access-bbl66") pod "c0fe5022-120f-42ca-b8a3-45790cea1e35" (UID: "c0fe5022-120f-42ca-b8a3-45790cea1e35"). InnerVolumeSpecName "kube-api-access-bbl66". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:50:30 crc kubenswrapper[5021]: I0121 15:50:30.564555 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c0fe5022-120f-42ca-b8a3-45790cea1e35-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c0fe5022-120f-42ca-b8a3-45790cea1e35" (UID: "c0fe5022-120f-42ca-b8a3-45790cea1e35"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:50:30 crc kubenswrapper[5021]: I0121 15:50:30.606598 5021 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c0fe5022-120f-42ca-b8a3-45790cea1e35-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 15:50:30 crc kubenswrapper[5021]: I0121 15:50:30.606638 5021 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c0fe5022-120f-42ca-b8a3-45790cea1e35-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 15:50:30 crc kubenswrapper[5021]: I0121 15:50:30.606652 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bbl66\" (UniqueName: \"kubernetes.io/projected/c0fe5022-120f-42ca-b8a3-45790cea1e35-kube-api-access-bbl66\") on node \"crc\" DevicePath \"\"" Jan 21 15:50:30 crc kubenswrapper[5021]: I0121 15:50:30.736525 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-hnr98"] Jan 21 15:50:30 crc kubenswrapper[5021]: I0121 15:50:30.750038 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-hnr98"] Jan 21 15:50:32 crc kubenswrapper[5021]: I0121 15:50:32.542793 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-pzm2l"] Jan 21 15:50:32 crc kubenswrapper[5021]: E0121 15:50:32.543134 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0fe5022-120f-42ca-b8a3-45790cea1e35" containerName="extract-content" Jan 21 15:50:32 crc kubenswrapper[5021]: I0121 15:50:32.543148 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0fe5022-120f-42ca-b8a3-45790cea1e35" containerName="extract-content" Jan 21 15:50:32 crc kubenswrapper[5021]: E0121 15:50:32.543172 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0fe5022-120f-42ca-b8a3-45790cea1e35" containerName="registry-server" Jan 21 15:50:32 crc kubenswrapper[5021]: I0121 15:50:32.543178 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0fe5022-120f-42ca-b8a3-45790cea1e35" containerName="registry-server" Jan 21 15:50:32 crc kubenswrapper[5021]: E0121 15:50:32.543190 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0fe5022-120f-42ca-b8a3-45790cea1e35" containerName="extract-utilities" Jan 21 15:50:32 crc kubenswrapper[5021]: I0121 15:50:32.543197 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0fe5022-120f-42ca-b8a3-45790cea1e35" containerName="extract-utilities" Jan 21 15:50:32 crc kubenswrapper[5021]: I0121 15:50:32.543368 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="c0fe5022-120f-42ca-b8a3-45790cea1e35" containerName="registry-server" Jan 21 15:50:32 crc kubenswrapper[5021]: I0121 15:50:32.544564 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pzm2l" Jan 21 15:50:32 crc kubenswrapper[5021]: I0121 15:50:32.559203 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-pzm2l"] Jan 21 15:50:32 crc kubenswrapper[5021]: I0121 15:50:32.653006 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dc728c32-6ec6-4a23-8a80-e44cfe63b09c-utilities\") pod \"redhat-marketplace-pzm2l\" (UID: \"dc728c32-6ec6-4a23-8a80-e44cfe63b09c\") " pod="openshift-marketplace/redhat-marketplace-pzm2l" Jan 21 15:50:32 crc kubenswrapper[5021]: I0121 15:50:32.653288 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bhjm9\" (UniqueName: \"kubernetes.io/projected/dc728c32-6ec6-4a23-8a80-e44cfe63b09c-kube-api-access-bhjm9\") pod \"redhat-marketplace-pzm2l\" (UID: \"dc728c32-6ec6-4a23-8a80-e44cfe63b09c\") " pod="openshift-marketplace/redhat-marketplace-pzm2l" Jan 21 15:50:32 crc kubenswrapper[5021]: I0121 15:50:32.653537 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dc728c32-6ec6-4a23-8a80-e44cfe63b09c-catalog-content\") pod \"redhat-marketplace-pzm2l\" (UID: \"dc728c32-6ec6-4a23-8a80-e44cfe63b09c\") " pod="openshift-marketplace/redhat-marketplace-pzm2l" Jan 21 15:50:32 crc kubenswrapper[5021]: I0121 15:50:32.746122 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c0fe5022-120f-42ca-b8a3-45790cea1e35" path="/var/lib/kubelet/pods/c0fe5022-120f-42ca-b8a3-45790cea1e35/volumes" Jan 21 15:50:32 crc kubenswrapper[5021]: I0121 15:50:32.755117 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bhjm9\" (UniqueName: \"kubernetes.io/projected/dc728c32-6ec6-4a23-8a80-e44cfe63b09c-kube-api-access-bhjm9\") pod \"redhat-marketplace-pzm2l\" (UID: \"dc728c32-6ec6-4a23-8a80-e44cfe63b09c\") " pod="openshift-marketplace/redhat-marketplace-pzm2l" Jan 21 15:50:32 crc kubenswrapper[5021]: I0121 15:50:32.755212 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dc728c32-6ec6-4a23-8a80-e44cfe63b09c-catalog-content\") pod \"redhat-marketplace-pzm2l\" (UID: \"dc728c32-6ec6-4a23-8a80-e44cfe63b09c\") " pod="openshift-marketplace/redhat-marketplace-pzm2l" Jan 21 15:50:32 crc kubenswrapper[5021]: I0121 15:50:32.755271 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dc728c32-6ec6-4a23-8a80-e44cfe63b09c-utilities\") pod \"redhat-marketplace-pzm2l\" (UID: \"dc728c32-6ec6-4a23-8a80-e44cfe63b09c\") " pod="openshift-marketplace/redhat-marketplace-pzm2l" Jan 21 15:50:32 crc kubenswrapper[5021]: I0121 15:50:32.755705 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dc728c32-6ec6-4a23-8a80-e44cfe63b09c-catalog-content\") pod \"redhat-marketplace-pzm2l\" (UID: \"dc728c32-6ec6-4a23-8a80-e44cfe63b09c\") " pod="openshift-marketplace/redhat-marketplace-pzm2l" Jan 21 15:50:32 crc kubenswrapper[5021]: I0121 15:50:32.755775 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dc728c32-6ec6-4a23-8a80-e44cfe63b09c-utilities\") pod \"redhat-marketplace-pzm2l\" (UID: \"dc728c32-6ec6-4a23-8a80-e44cfe63b09c\") " pod="openshift-marketplace/redhat-marketplace-pzm2l" Jan 21 15:50:32 crc kubenswrapper[5021]: I0121 15:50:32.779006 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bhjm9\" (UniqueName: \"kubernetes.io/projected/dc728c32-6ec6-4a23-8a80-e44cfe63b09c-kube-api-access-bhjm9\") pod \"redhat-marketplace-pzm2l\" (UID: \"dc728c32-6ec6-4a23-8a80-e44cfe63b09c\") " pod="openshift-marketplace/redhat-marketplace-pzm2l" Jan 21 15:50:32 crc kubenswrapper[5021]: I0121 15:50:32.865256 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pzm2l" Jan 21 15:50:33 crc kubenswrapper[5021]: I0121 15:50:33.455288 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-pzm2l"] Jan 21 15:50:33 crc kubenswrapper[5021]: W0121 15:50:33.456791 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddc728c32_6ec6_4a23_8a80_e44cfe63b09c.slice/crio-70ea0c4c7d067bbeb5e1ac27d32e0353a31b8682368304259e2df59e39f08076 WatchSource:0}: Error finding container 70ea0c4c7d067bbeb5e1ac27d32e0353a31b8682368304259e2df59e39f08076: Status 404 returned error can't find the container with id 70ea0c4c7d067bbeb5e1ac27d32e0353a31b8682368304259e2df59e39f08076 Jan 21 15:50:34 crc kubenswrapper[5021]: I0121 15:50:34.413362 5021 generic.go:334] "Generic (PLEG): container finished" podID="dc728c32-6ec6-4a23-8a80-e44cfe63b09c" containerID="9659b9ee2146c5bee8e9f98cc689ca108a046ebc78b6a2aeb5f058a77fb342ed" exitCode=0 Jan 21 15:50:34 crc kubenswrapper[5021]: I0121 15:50:34.413421 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pzm2l" event={"ID":"dc728c32-6ec6-4a23-8a80-e44cfe63b09c","Type":"ContainerDied","Data":"9659b9ee2146c5bee8e9f98cc689ca108a046ebc78b6a2aeb5f058a77fb342ed"} Jan 21 15:50:34 crc kubenswrapper[5021]: I0121 15:50:34.413708 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pzm2l" event={"ID":"dc728c32-6ec6-4a23-8a80-e44cfe63b09c","Type":"ContainerStarted","Data":"70ea0c4c7d067bbeb5e1ac27d32e0353a31b8682368304259e2df59e39f08076"} Jan 21 15:50:35 crc kubenswrapper[5021]: I0121 15:50:35.690149 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-slxmd" Jan 21 15:50:35 crc kubenswrapper[5021]: I0121 15:50:35.691462 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-slxmd" Jan 21 15:50:35 crc kubenswrapper[5021]: I0121 15:50:35.757441 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-slxmd" Jan 21 15:50:36 crc kubenswrapper[5021]: I0121 15:50:36.433123 5021 generic.go:334] "Generic (PLEG): container finished" podID="dc728c32-6ec6-4a23-8a80-e44cfe63b09c" containerID="f7c150a2e6edeb50e1048b6b0cc9f8d3b43f4f26ae53e2cc3aab83b3d8278dbd" exitCode=0 Jan 21 15:50:36 crc kubenswrapper[5021]: I0121 15:50:36.435183 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pzm2l" event={"ID":"dc728c32-6ec6-4a23-8a80-e44cfe63b09c","Type":"ContainerDied","Data":"f7c150a2e6edeb50e1048b6b0cc9f8d3b43f4f26ae53e2cc3aab83b3d8278dbd"} Jan 21 15:50:36 crc kubenswrapper[5021]: I0121 15:50:36.484269 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-slxmd" Jan 21 15:50:37 crc kubenswrapper[5021]: I0121 15:50:37.446227 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pzm2l" event={"ID":"dc728c32-6ec6-4a23-8a80-e44cfe63b09c","Type":"ContainerStarted","Data":"68c5ce0e656bbf74b5f3dd269980e3e6a89c7d43196be3da80ec9021c8c79b39"} Jan 21 15:50:37 crc kubenswrapper[5021]: I0121 15:50:37.476308 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-pzm2l" podStartSLOduration=3.005259616 podStartE2EDuration="5.476284379s" podCreationTimestamp="2026-01-21 15:50:32 +0000 UTC" firstStartedPulling="2026-01-21 15:50:34.414863604 +0000 UTC m=+1575.949977493" lastFinishedPulling="2026-01-21 15:50:36.885888367 +0000 UTC m=+1578.421002256" observedRunningTime="2026-01-21 15:50:37.473381382 +0000 UTC m=+1579.008495261" watchObservedRunningTime="2026-01-21 15:50:37.476284379 +0000 UTC m=+1579.011398268" Jan 21 15:50:38 crc kubenswrapper[5021]: I0121 15:50:38.134201 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-slxmd"] Jan 21 15:50:39 crc kubenswrapper[5021]: I0121 15:50:39.461074 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-slxmd" podUID="ac98c530-4c15-4d36-b5bb-1216caadb18d" containerName="registry-server" containerID="cri-o://f52d6962e082850219f71eb3a2ad9123f3fa1ac5fc639b5f48dc35569d0ab380" gracePeriod=2 Jan 21 15:50:42 crc kubenswrapper[5021]: I0121 15:50:42.356731 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 15:50:42 crc kubenswrapper[5021]: I0121 15:50:42.357034 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 15:50:42 crc kubenswrapper[5021]: I0121 15:50:42.357095 5021 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" Jan 21 15:50:42 crc kubenswrapper[5021]: I0121 15:50:42.357858 5021 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8e9be393254a6396e6490f6f969239e3a23b32799a213435ac8de18190c629f8"} pod="openshift-machine-config-operator/machine-config-daemon-n22xz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 21 15:50:42 crc kubenswrapper[5021]: I0121 15:50:42.357944 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" containerID="cri-o://8e9be393254a6396e6490f6f969239e3a23b32799a213435ac8de18190c629f8" gracePeriod=600 Jan 21 15:50:42 crc kubenswrapper[5021]: I0121 15:50:42.865757 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-pzm2l" Jan 21 15:50:42 crc kubenswrapper[5021]: I0121 15:50:42.866115 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-pzm2l" Jan 21 15:50:42 crc kubenswrapper[5021]: I0121 15:50:42.909011 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-pzm2l" Jan 21 15:50:43 crc kubenswrapper[5021]: I0121 15:50:43.496041 5021 generic.go:334] "Generic (PLEG): container finished" podID="ac98c530-4c15-4d36-b5bb-1216caadb18d" containerID="f52d6962e082850219f71eb3a2ad9123f3fa1ac5fc639b5f48dc35569d0ab380" exitCode=0 Jan 21 15:50:43 crc kubenswrapper[5021]: I0121 15:50:43.497252 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-slxmd" event={"ID":"ac98c530-4c15-4d36-b5bb-1216caadb18d","Type":"ContainerDied","Data":"f52d6962e082850219f71eb3a2ad9123f3fa1ac5fc639b5f48dc35569d0ab380"} Jan 21 15:50:43 crc kubenswrapper[5021]: I0121 15:50:43.538562 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-pzm2l" Jan 21 15:50:43 crc kubenswrapper[5021]: I0121 15:50:43.934482 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-pzm2l"] Jan 21 15:50:44 crc kubenswrapper[5021]: I0121 15:50:44.204432 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-slxmd" Jan 21 15:50:44 crc kubenswrapper[5021]: I0121 15:50:44.220322 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ac98c530-4c15-4d36-b5bb-1216caadb18d-utilities\") pod \"ac98c530-4c15-4d36-b5bb-1216caadb18d\" (UID: \"ac98c530-4c15-4d36-b5bb-1216caadb18d\") " Jan 21 15:50:44 crc kubenswrapper[5021]: I0121 15:50:44.220377 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ac98c530-4c15-4d36-b5bb-1216caadb18d-catalog-content\") pod \"ac98c530-4c15-4d36-b5bb-1216caadb18d\" (UID: \"ac98c530-4c15-4d36-b5bb-1216caadb18d\") " Jan 21 15:50:44 crc kubenswrapper[5021]: I0121 15:50:44.220424 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9bbfk\" (UniqueName: \"kubernetes.io/projected/ac98c530-4c15-4d36-b5bb-1216caadb18d-kube-api-access-9bbfk\") pod \"ac98c530-4c15-4d36-b5bb-1216caadb18d\" (UID: \"ac98c530-4c15-4d36-b5bb-1216caadb18d\") " Jan 21 15:50:44 crc kubenswrapper[5021]: I0121 15:50:44.221886 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ac98c530-4c15-4d36-b5bb-1216caadb18d-utilities" (OuterVolumeSpecName: "utilities") pod "ac98c530-4c15-4d36-b5bb-1216caadb18d" (UID: "ac98c530-4c15-4d36-b5bb-1216caadb18d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:50:44 crc kubenswrapper[5021]: I0121 15:50:44.230662 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ac98c530-4c15-4d36-b5bb-1216caadb18d-kube-api-access-9bbfk" (OuterVolumeSpecName: "kube-api-access-9bbfk") pod "ac98c530-4c15-4d36-b5bb-1216caadb18d" (UID: "ac98c530-4c15-4d36-b5bb-1216caadb18d"). InnerVolumeSpecName "kube-api-access-9bbfk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:50:44 crc kubenswrapper[5021]: I0121 15:50:44.273527 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ac98c530-4c15-4d36-b5bb-1216caadb18d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ac98c530-4c15-4d36-b5bb-1216caadb18d" (UID: "ac98c530-4c15-4d36-b5bb-1216caadb18d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:50:44 crc kubenswrapper[5021]: I0121 15:50:44.323084 5021 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ac98c530-4c15-4d36-b5bb-1216caadb18d-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 15:50:44 crc kubenswrapper[5021]: I0121 15:50:44.323119 5021 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ac98c530-4c15-4d36-b5bb-1216caadb18d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 15:50:44 crc kubenswrapper[5021]: I0121 15:50:44.323132 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9bbfk\" (UniqueName: \"kubernetes.io/projected/ac98c530-4c15-4d36-b5bb-1216caadb18d-kube-api-access-9bbfk\") on node \"crc\" DevicePath \"\"" Jan 21 15:50:44 crc kubenswrapper[5021]: E0121 15:50:44.431156 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 15:50:44 crc kubenswrapper[5021]: I0121 15:50:44.508548 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-slxmd" event={"ID":"ac98c530-4c15-4d36-b5bb-1216caadb18d","Type":"ContainerDied","Data":"f4cf774c69ee43d5f606d9cb34ac2adb81784ba184d14b49a62d59a4420095a2"} Jan 21 15:50:44 crc kubenswrapper[5021]: I0121 15:50:44.508597 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-slxmd" Jan 21 15:50:44 crc kubenswrapper[5021]: I0121 15:50:44.508613 5021 scope.go:117] "RemoveContainer" containerID="f52d6962e082850219f71eb3a2ad9123f3fa1ac5fc639b5f48dc35569d0ab380" Jan 21 15:50:44 crc kubenswrapper[5021]: I0121 15:50:44.512555 5021 generic.go:334] "Generic (PLEG): container finished" podID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerID="8e9be393254a6396e6490f6f969239e3a23b32799a213435ac8de18190c629f8" exitCode=0 Jan 21 15:50:44 crc kubenswrapper[5021]: I0121 15:50:44.512628 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" event={"ID":"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1","Type":"ContainerDied","Data":"8e9be393254a6396e6490f6f969239e3a23b32799a213435ac8de18190c629f8"} Jan 21 15:50:44 crc kubenswrapper[5021]: I0121 15:50:44.513131 5021 scope.go:117] "RemoveContainer" containerID="8e9be393254a6396e6490f6f969239e3a23b32799a213435ac8de18190c629f8" Jan 21 15:50:44 crc kubenswrapper[5021]: E0121 15:50:44.513396 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 15:50:44 crc kubenswrapper[5021]: I0121 15:50:44.538461 5021 scope.go:117] "RemoveContainer" containerID="f9d64856415bd5548e07402344a8662cf72766eaafa65b2ee8aef61d414061c7" Jan 21 15:50:44 crc kubenswrapper[5021]: I0121 15:50:44.556525 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-slxmd"] Jan 21 15:50:44 crc kubenswrapper[5021]: I0121 15:50:44.568535 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-slxmd"] Jan 21 15:50:44 crc kubenswrapper[5021]: I0121 15:50:44.574621 5021 scope.go:117] "RemoveContainer" containerID="68e4ea7a1a485a1df214180ddf1a0ac4f772df8b2eca64241a7b941ab73314e7" Jan 21 15:50:44 crc kubenswrapper[5021]: I0121 15:50:44.594277 5021 scope.go:117] "RemoveContainer" containerID="05d959e8221a0471293433aa813fc9a057fa0942334f34a10dd166457b8ac583" Jan 21 15:50:44 crc kubenswrapper[5021]: I0121 15:50:44.750246 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ac98c530-4c15-4d36-b5bb-1216caadb18d" path="/var/lib/kubelet/pods/ac98c530-4c15-4d36-b5bb-1216caadb18d/volumes" Jan 21 15:50:45 crc kubenswrapper[5021]: I0121 15:50:45.525598 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-pzm2l" podUID="dc728c32-6ec6-4a23-8a80-e44cfe63b09c" containerName="registry-server" containerID="cri-o://68c5ce0e656bbf74b5f3dd269980e3e6a89c7d43196be3da80ec9021c8c79b39" gracePeriod=2 Jan 21 15:50:46 crc kubenswrapper[5021]: I0121 15:50:46.439608 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pzm2l" Jan 21 15:50:46 crc kubenswrapper[5021]: I0121 15:50:46.534175 5021 generic.go:334] "Generic (PLEG): container finished" podID="dc728c32-6ec6-4a23-8a80-e44cfe63b09c" containerID="68c5ce0e656bbf74b5f3dd269980e3e6a89c7d43196be3da80ec9021c8c79b39" exitCode=0 Jan 21 15:50:46 crc kubenswrapper[5021]: I0121 15:50:46.534475 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pzm2l" event={"ID":"dc728c32-6ec6-4a23-8a80-e44cfe63b09c","Type":"ContainerDied","Data":"68c5ce0e656bbf74b5f3dd269980e3e6a89c7d43196be3da80ec9021c8c79b39"} Jan 21 15:50:46 crc kubenswrapper[5021]: I0121 15:50:46.534586 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pzm2l" Jan 21 15:50:46 crc kubenswrapper[5021]: I0121 15:50:46.535241 5021 scope.go:117] "RemoveContainer" containerID="68c5ce0e656bbf74b5f3dd269980e3e6a89c7d43196be3da80ec9021c8c79b39" Jan 21 15:50:46 crc kubenswrapper[5021]: I0121 15:50:46.535137 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pzm2l" event={"ID":"dc728c32-6ec6-4a23-8a80-e44cfe63b09c","Type":"ContainerDied","Data":"70ea0c4c7d067bbeb5e1ac27d32e0353a31b8682368304259e2df59e39f08076"} Jan 21 15:50:46 crc kubenswrapper[5021]: I0121 15:50:46.556634 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dc728c32-6ec6-4a23-8a80-e44cfe63b09c-utilities\") pod \"dc728c32-6ec6-4a23-8a80-e44cfe63b09c\" (UID: \"dc728c32-6ec6-4a23-8a80-e44cfe63b09c\") " Jan 21 15:50:46 crc kubenswrapper[5021]: I0121 15:50:46.556731 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dc728c32-6ec6-4a23-8a80-e44cfe63b09c-catalog-content\") pod \"dc728c32-6ec6-4a23-8a80-e44cfe63b09c\" (UID: \"dc728c32-6ec6-4a23-8a80-e44cfe63b09c\") " Jan 21 15:50:46 crc kubenswrapper[5021]: I0121 15:50:46.556761 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bhjm9\" (UniqueName: \"kubernetes.io/projected/dc728c32-6ec6-4a23-8a80-e44cfe63b09c-kube-api-access-bhjm9\") pod \"dc728c32-6ec6-4a23-8a80-e44cfe63b09c\" (UID: \"dc728c32-6ec6-4a23-8a80-e44cfe63b09c\") " Jan 21 15:50:46 crc kubenswrapper[5021]: I0121 15:50:46.558530 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dc728c32-6ec6-4a23-8a80-e44cfe63b09c-utilities" (OuterVolumeSpecName: "utilities") pod "dc728c32-6ec6-4a23-8a80-e44cfe63b09c" (UID: "dc728c32-6ec6-4a23-8a80-e44cfe63b09c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:50:46 crc kubenswrapper[5021]: I0121 15:50:46.559856 5021 scope.go:117] "RemoveContainer" containerID="f7c150a2e6edeb50e1048b6b0cc9f8d3b43f4f26ae53e2cc3aab83b3d8278dbd" Jan 21 15:50:46 crc kubenswrapper[5021]: I0121 15:50:46.567242 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc728c32-6ec6-4a23-8a80-e44cfe63b09c-kube-api-access-bhjm9" (OuterVolumeSpecName: "kube-api-access-bhjm9") pod "dc728c32-6ec6-4a23-8a80-e44cfe63b09c" (UID: "dc728c32-6ec6-4a23-8a80-e44cfe63b09c"). InnerVolumeSpecName "kube-api-access-bhjm9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:50:46 crc kubenswrapper[5021]: I0121 15:50:46.602144 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dc728c32-6ec6-4a23-8a80-e44cfe63b09c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "dc728c32-6ec6-4a23-8a80-e44cfe63b09c" (UID: "dc728c32-6ec6-4a23-8a80-e44cfe63b09c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:50:46 crc kubenswrapper[5021]: I0121 15:50:46.608335 5021 scope.go:117] "RemoveContainer" containerID="9659b9ee2146c5bee8e9f98cc689ca108a046ebc78b6a2aeb5f058a77fb342ed" Jan 21 15:50:46 crc kubenswrapper[5021]: I0121 15:50:46.639472 5021 scope.go:117] "RemoveContainer" containerID="68c5ce0e656bbf74b5f3dd269980e3e6a89c7d43196be3da80ec9021c8c79b39" Jan 21 15:50:46 crc kubenswrapper[5021]: E0121 15:50:46.640159 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"68c5ce0e656bbf74b5f3dd269980e3e6a89c7d43196be3da80ec9021c8c79b39\": container with ID starting with 68c5ce0e656bbf74b5f3dd269980e3e6a89c7d43196be3da80ec9021c8c79b39 not found: ID does not exist" containerID="68c5ce0e656bbf74b5f3dd269980e3e6a89c7d43196be3da80ec9021c8c79b39" Jan 21 15:50:46 crc kubenswrapper[5021]: I0121 15:50:46.640215 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"68c5ce0e656bbf74b5f3dd269980e3e6a89c7d43196be3da80ec9021c8c79b39"} err="failed to get container status \"68c5ce0e656bbf74b5f3dd269980e3e6a89c7d43196be3da80ec9021c8c79b39\": rpc error: code = NotFound desc = could not find container \"68c5ce0e656bbf74b5f3dd269980e3e6a89c7d43196be3da80ec9021c8c79b39\": container with ID starting with 68c5ce0e656bbf74b5f3dd269980e3e6a89c7d43196be3da80ec9021c8c79b39 not found: ID does not exist" Jan 21 15:50:46 crc kubenswrapper[5021]: I0121 15:50:46.640256 5021 scope.go:117] "RemoveContainer" containerID="f7c150a2e6edeb50e1048b6b0cc9f8d3b43f4f26ae53e2cc3aab83b3d8278dbd" Jan 21 15:50:46 crc kubenswrapper[5021]: E0121 15:50:46.640801 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f7c150a2e6edeb50e1048b6b0cc9f8d3b43f4f26ae53e2cc3aab83b3d8278dbd\": container with ID starting with f7c150a2e6edeb50e1048b6b0cc9f8d3b43f4f26ae53e2cc3aab83b3d8278dbd not found: ID does not exist" containerID="f7c150a2e6edeb50e1048b6b0cc9f8d3b43f4f26ae53e2cc3aab83b3d8278dbd" Jan 21 15:50:46 crc kubenswrapper[5021]: I0121 15:50:46.640837 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f7c150a2e6edeb50e1048b6b0cc9f8d3b43f4f26ae53e2cc3aab83b3d8278dbd"} err="failed to get container status \"f7c150a2e6edeb50e1048b6b0cc9f8d3b43f4f26ae53e2cc3aab83b3d8278dbd\": rpc error: code = NotFound desc = could not find container \"f7c150a2e6edeb50e1048b6b0cc9f8d3b43f4f26ae53e2cc3aab83b3d8278dbd\": container with ID starting with f7c150a2e6edeb50e1048b6b0cc9f8d3b43f4f26ae53e2cc3aab83b3d8278dbd not found: ID does not exist" Jan 21 15:50:46 crc kubenswrapper[5021]: I0121 15:50:46.640867 5021 scope.go:117] "RemoveContainer" containerID="9659b9ee2146c5bee8e9f98cc689ca108a046ebc78b6a2aeb5f058a77fb342ed" Jan 21 15:50:46 crc kubenswrapper[5021]: E0121 15:50:46.641336 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9659b9ee2146c5bee8e9f98cc689ca108a046ebc78b6a2aeb5f058a77fb342ed\": container with ID starting with 9659b9ee2146c5bee8e9f98cc689ca108a046ebc78b6a2aeb5f058a77fb342ed not found: ID does not exist" containerID="9659b9ee2146c5bee8e9f98cc689ca108a046ebc78b6a2aeb5f058a77fb342ed" Jan 21 15:50:46 crc kubenswrapper[5021]: I0121 15:50:46.641384 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9659b9ee2146c5bee8e9f98cc689ca108a046ebc78b6a2aeb5f058a77fb342ed"} err="failed to get container status \"9659b9ee2146c5bee8e9f98cc689ca108a046ebc78b6a2aeb5f058a77fb342ed\": rpc error: code = NotFound desc = could not find container \"9659b9ee2146c5bee8e9f98cc689ca108a046ebc78b6a2aeb5f058a77fb342ed\": container with ID starting with 9659b9ee2146c5bee8e9f98cc689ca108a046ebc78b6a2aeb5f058a77fb342ed not found: ID does not exist" Jan 21 15:50:46 crc kubenswrapper[5021]: I0121 15:50:46.658037 5021 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dc728c32-6ec6-4a23-8a80-e44cfe63b09c-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 15:50:46 crc kubenswrapper[5021]: I0121 15:50:46.658075 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bhjm9\" (UniqueName: \"kubernetes.io/projected/dc728c32-6ec6-4a23-8a80-e44cfe63b09c-kube-api-access-bhjm9\") on node \"crc\" DevicePath \"\"" Jan 21 15:50:46 crc kubenswrapper[5021]: I0121 15:50:46.658090 5021 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dc728c32-6ec6-4a23-8a80-e44cfe63b09c-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 15:50:46 crc kubenswrapper[5021]: I0121 15:50:46.856006 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-pzm2l"] Jan 21 15:50:46 crc kubenswrapper[5021]: I0121 15:50:46.862259 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-pzm2l"] Jan 21 15:50:47 crc kubenswrapper[5021]: I0121 15:50:47.357136 5021 scope.go:117] "RemoveContainer" containerID="2222207d1f91ccab2e1591175304e36104367aaaea7641b601745cce585ca9c3" Jan 21 15:50:47 crc kubenswrapper[5021]: I0121 15:50:47.380615 5021 scope.go:117] "RemoveContainer" containerID="6423618e4f798127b221f0020865806b64603cacc671521fc8e021719922d114" Jan 21 15:50:47 crc kubenswrapper[5021]: I0121 15:50:47.404297 5021 scope.go:117] "RemoveContainer" containerID="31eb86a8dd09e080838036bb26c536e93153e4ab7d25d69884f6816ae53e5ab2" Jan 21 15:50:47 crc kubenswrapper[5021]: I0121 15:50:47.424488 5021 scope.go:117] "RemoveContainer" containerID="f185e33f65cebb8b90363af17b53d1053a78bb996999011fdf8dc5805f5075e4" Jan 21 15:50:47 crc kubenswrapper[5021]: I0121 15:50:47.471535 5021 scope.go:117] "RemoveContainer" containerID="9a286bca13698cf3723eccf10cd3533714ef03b0b034523d4b239312a8d255a7" Jan 21 15:50:47 crc kubenswrapper[5021]: I0121 15:50:47.492024 5021 scope.go:117] "RemoveContainer" containerID="c71262d50510701f84d14102a9000a8be2165c803d4f6eb3c3946ce234d084ae" Jan 21 15:50:47 crc kubenswrapper[5021]: I0121 15:50:47.518547 5021 scope.go:117] "RemoveContainer" containerID="4da65adcb8ccfeb9a613436dbfa586305e06ea8f16f79bf23435b620e6b5c598" Jan 21 15:50:47 crc kubenswrapper[5021]: I0121 15:50:47.558309 5021 scope.go:117] "RemoveContainer" containerID="d210d69f9cddcd085b4561422c284ffe76fd5025c9fe545ad5a9114be9035384" Jan 21 15:50:47 crc kubenswrapper[5021]: I0121 15:50:47.578871 5021 scope.go:117] "RemoveContainer" containerID="5819573d359e2d9a1b65039d98dc4e760af3f22687fb7cd64949bd947e539de2" Jan 21 15:50:47 crc kubenswrapper[5021]: I0121 15:50:47.616203 5021 scope.go:117] "RemoveContainer" containerID="d9a507933f6db91750f321b78dd88b1f0d89e634d83ca2dd3c6ff7e83051f0c9" Jan 21 15:50:47 crc kubenswrapper[5021]: I0121 15:50:47.647585 5021 scope.go:117] "RemoveContainer" containerID="120163cff46fcf4d06479ef6e02fb2122698e67e2d06d3253bcc4438b94f5573" Jan 21 15:50:47 crc kubenswrapper[5021]: I0121 15:50:47.684417 5021 scope.go:117] "RemoveContainer" containerID="1381cb9e98b9d58aee20132248f39b0e3835aeefc14aa3be3101489ad1904c97" Jan 21 15:50:48 crc kubenswrapper[5021]: I0121 15:50:48.748211 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dc728c32-6ec6-4a23-8a80-e44cfe63b09c" path="/var/lib/kubelet/pods/dc728c32-6ec6-4a23-8a80-e44cfe63b09c/volumes" Jan 21 15:50:55 crc kubenswrapper[5021]: I0121 15:50:55.737830 5021 scope.go:117] "RemoveContainer" containerID="8e9be393254a6396e6490f6f969239e3a23b32799a213435ac8de18190c629f8" Jan 21 15:50:55 crc kubenswrapper[5021]: E0121 15:50:55.738604 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 15:51:09 crc kubenswrapper[5021]: I0121 15:51:09.738375 5021 scope.go:117] "RemoveContainer" containerID="8e9be393254a6396e6490f6f969239e3a23b32799a213435ac8de18190c629f8" Jan 21 15:51:09 crc kubenswrapper[5021]: E0121 15:51:09.739122 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 15:51:21 crc kubenswrapper[5021]: I0121 15:51:21.738228 5021 scope.go:117] "RemoveContainer" containerID="8e9be393254a6396e6490f6f969239e3a23b32799a213435ac8de18190c629f8" Jan 21 15:51:21 crc kubenswrapper[5021]: E0121 15:51:21.739153 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 15:51:35 crc kubenswrapper[5021]: I0121 15:51:35.738336 5021 scope.go:117] "RemoveContainer" containerID="8e9be393254a6396e6490f6f969239e3a23b32799a213435ac8de18190c629f8" Jan 21 15:51:35 crc kubenswrapper[5021]: E0121 15:51:35.739158 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 15:51:46 crc kubenswrapper[5021]: I0121 15:51:46.738239 5021 scope.go:117] "RemoveContainer" containerID="8e9be393254a6396e6490f6f969239e3a23b32799a213435ac8de18190c629f8" Jan 21 15:51:46 crc kubenswrapper[5021]: E0121 15:51:46.739249 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 15:51:47 crc kubenswrapper[5021]: I0121 15:51:47.860830 5021 scope.go:117] "RemoveContainer" containerID="51eec0552e08bfb2c091d10faebfb1b194d09fd936b7af82ef50e2debeffa6c1" Jan 21 15:51:47 crc kubenswrapper[5021]: I0121 15:51:47.896679 5021 scope.go:117] "RemoveContainer" containerID="4db685e2ebf7830711dda7c3e4c0a750beef34f9db60d96ca446d206370a200e" Jan 21 15:51:47 crc kubenswrapper[5021]: I0121 15:51:47.918458 5021 scope.go:117] "RemoveContainer" containerID="835ae94f46e8d3a10de61c425c18bc14595bed85ddf2222528e93073ee8ed02d" Jan 21 15:51:47 crc kubenswrapper[5021]: I0121 15:51:47.936655 5021 scope.go:117] "RemoveContainer" containerID="5e4bbcdf00bc1e50c5704613a05d600796fcf5cc609a68ac31315e5938f7dcfc" Jan 21 15:52:00 crc kubenswrapper[5021]: I0121 15:52:00.739122 5021 scope.go:117] "RemoveContainer" containerID="8e9be393254a6396e6490f6f969239e3a23b32799a213435ac8de18190c629f8" Jan 21 15:52:00 crc kubenswrapper[5021]: E0121 15:52:00.741579 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 15:52:14 crc kubenswrapper[5021]: I0121 15:52:14.738044 5021 scope.go:117] "RemoveContainer" containerID="8e9be393254a6396e6490f6f969239e3a23b32799a213435ac8de18190c629f8" Jan 21 15:52:14 crc kubenswrapper[5021]: E0121 15:52:14.738932 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 15:52:25 crc kubenswrapper[5021]: I0121 15:52:25.738825 5021 scope.go:117] "RemoveContainer" containerID="8e9be393254a6396e6490f6f969239e3a23b32799a213435ac8de18190c629f8" Jan 21 15:52:25 crc kubenswrapper[5021]: E0121 15:52:25.739522 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 15:52:38 crc kubenswrapper[5021]: I0121 15:52:38.742365 5021 scope.go:117] "RemoveContainer" containerID="8e9be393254a6396e6490f6f969239e3a23b32799a213435ac8de18190c629f8" Jan 21 15:52:38 crc kubenswrapper[5021]: E0121 15:52:38.743265 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 15:52:48 crc kubenswrapper[5021]: I0121 15:52:48.007650 5021 scope.go:117] "RemoveContainer" containerID="fc1d0ae289408d89e328587bc4c294526c1aecae909b14446688e9bf7d5d6da7" Jan 21 15:52:48 crc kubenswrapper[5021]: I0121 15:52:48.034933 5021 scope.go:117] "RemoveContainer" containerID="40d25da6134bf8a7e089cfd3c065d27e8bfaa57441da4124734dff449dcf1ca3" Jan 21 15:52:48 crc kubenswrapper[5021]: I0121 15:52:48.057030 5021 scope.go:117] "RemoveContainer" containerID="6b8005ac26237642083ae21d321912d31a41bc53e8cc8714923e3a28c95e2695" Jan 21 15:52:48 crc kubenswrapper[5021]: I0121 15:52:48.075284 5021 scope.go:117] "RemoveContainer" containerID="0d24f64a0731db8d85873c1c608acc97fb5bbe26445980ffbd17a463bbba03f3" Jan 21 15:52:48 crc kubenswrapper[5021]: I0121 15:52:48.091492 5021 scope.go:117] "RemoveContainer" containerID="fbb10e55d2fb1f75b2a4d2ea3d962bd38837ca0992ed6675c730962b996dda86" Jan 21 15:52:48 crc kubenswrapper[5021]: I0121 15:52:48.114731 5021 scope.go:117] "RemoveContainer" containerID="f2a67cef1ba2db3bd18f3dfff338b23523c5e1f0086e64b2d5a4f0a2940f8a1e" Jan 21 15:52:48 crc kubenswrapper[5021]: I0121 15:52:48.140381 5021 scope.go:117] "RemoveContainer" containerID="940f863573ce6eb57381c2fbbca658d251917ef9b33253b0a3d16577b3809021" Jan 21 15:52:48 crc kubenswrapper[5021]: I0121 15:52:48.174954 5021 scope.go:117] "RemoveContainer" containerID="13e437facb9eed154cb3a1fc466a26799000944b7d4e3a944a35585bf94ce10b" Jan 21 15:52:48 crc kubenswrapper[5021]: I0121 15:52:48.193209 5021 scope.go:117] "RemoveContainer" containerID="71256733ff9fecce00f3c2e2b2d62f457f32792c57777df4cc9c78ee8ade1f0a" Jan 21 15:52:48 crc kubenswrapper[5021]: I0121 15:52:48.209957 5021 scope.go:117] "RemoveContainer" containerID="ce41f6ce53276698af88c81dec88a1a28dfd43d7f45ad89f9badad8d1a417043" Jan 21 15:52:48 crc kubenswrapper[5021]: I0121 15:52:48.232595 5021 scope.go:117] "RemoveContainer" containerID="f4109119afa6957e21234d569dc42804535d59145205c52b5588d6791cfc42fc" Jan 21 15:52:50 crc kubenswrapper[5021]: I0121 15:52:50.738758 5021 scope.go:117] "RemoveContainer" containerID="8e9be393254a6396e6490f6f969239e3a23b32799a213435ac8de18190c629f8" Jan 21 15:52:50 crc kubenswrapper[5021]: E0121 15:52:50.739472 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 15:53:04 crc kubenswrapper[5021]: I0121 15:53:04.739091 5021 scope.go:117] "RemoveContainer" containerID="8e9be393254a6396e6490f6f969239e3a23b32799a213435ac8de18190c629f8" Jan 21 15:53:04 crc kubenswrapper[5021]: E0121 15:53:04.739842 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 15:53:17 crc kubenswrapper[5021]: I0121 15:53:17.738464 5021 scope.go:117] "RemoveContainer" containerID="8e9be393254a6396e6490f6f969239e3a23b32799a213435ac8de18190c629f8" Jan 21 15:53:17 crc kubenswrapper[5021]: E0121 15:53:17.739265 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 15:53:29 crc kubenswrapper[5021]: I0121 15:53:29.737743 5021 scope.go:117] "RemoveContainer" containerID="8e9be393254a6396e6490f6f969239e3a23b32799a213435ac8de18190c629f8" Jan 21 15:53:29 crc kubenswrapper[5021]: E0121 15:53:29.738536 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 15:53:43 crc kubenswrapper[5021]: I0121 15:53:43.738570 5021 scope.go:117] "RemoveContainer" containerID="8e9be393254a6396e6490f6f969239e3a23b32799a213435ac8de18190c629f8" Jan 21 15:53:43 crc kubenswrapper[5021]: E0121 15:53:43.739459 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 15:53:48 crc kubenswrapper[5021]: I0121 15:53:48.356775 5021 scope.go:117] "RemoveContainer" containerID="43652315ff48ea4407b52df2509f36ccd3219420f681b4fe2028c844c527758b" Jan 21 15:53:48 crc kubenswrapper[5021]: I0121 15:53:48.401728 5021 scope.go:117] "RemoveContainer" containerID="8ffd7c56590e0ef552ab69c79953dfb064b804430a4f5bc577f323b3dfcea70a" Jan 21 15:53:56 crc kubenswrapper[5021]: I0121 15:53:56.738276 5021 scope.go:117] "RemoveContainer" containerID="8e9be393254a6396e6490f6f969239e3a23b32799a213435ac8de18190c629f8" Jan 21 15:53:56 crc kubenswrapper[5021]: E0121 15:53:56.739099 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 15:54:08 crc kubenswrapper[5021]: I0121 15:54:08.741744 5021 scope.go:117] "RemoveContainer" containerID="8e9be393254a6396e6490f6f969239e3a23b32799a213435ac8de18190c629f8" Jan 21 15:54:08 crc kubenswrapper[5021]: E0121 15:54:08.742545 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 15:54:21 crc kubenswrapper[5021]: I0121 15:54:21.739417 5021 scope.go:117] "RemoveContainer" containerID="8e9be393254a6396e6490f6f969239e3a23b32799a213435ac8de18190c629f8" Jan 21 15:54:21 crc kubenswrapper[5021]: E0121 15:54:21.740391 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 15:54:36 crc kubenswrapper[5021]: I0121 15:54:36.737849 5021 scope.go:117] "RemoveContainer" containerID="8e9be393254a6396e6490f6f969239e3a23b32799a213435ac8de18190c629f8" Jan 21 15:54:36 crc kubenswrapper[5021]: E0121 15:54:36.738857 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 15:54:48 crc kubenswrapper[5021]: I0121 15:54:48.473784 5021 scope.go:117] "RemoveContainer" containerID="a3133938f14e8324e72398c04b176cec55efcab2ec1daa967c36bab3517e94d2" Jan 21 15:54:48 crc kubenswrapper[5021]: I0121 15:54:48.491955 5021 scope.go:117] "RemoveContainer" containerID="57acaf70f1630d479893a569ee9512f44f17d7e93296529ac9b7d31b0b49df2b" Jan 21 15:54:48 crc kubenswrapper[5021]: I0121 15:54:48.526254 5021 scope.go:117] "RemoveContainer" containerID="24df4912a140964b1e0d41fa753cf688016ba7e3f96a6f1271116b4a363e6c33" Jan 21 15:54:48 crc kubenswrapper[5021]: I0121 15:54:48.545973 5021 scope.go:117] "RemoveContainer" containerID="b62f3c98443953b412d9b61259124a212fc11ad27cc0955aaff8ed76fd0ab856" Jan 21 15:54:49 crc kubenswrapper[5021]: I0121 15:54:49.737361 5021 scope.go:117] "RemoveContainer" containerID="8e9be393254a6396e6490f6f969239e3a23b32799a213435ac8de18190c629f8" Jan 21 15:54:49 crc kubenswrapper[5021]: E0121 15:54:49.737924 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 15:55:00 crc kubenswrapper[5021]: I0121 15:55:00.738289 5021 scope.go:117] "RemoveContainer" containerID="8e9be393254a6396e6490f6f969239e3a23b32799a213435ac8de18190c629f8" Jan 21 15:55:00 crc kubenswrapper[5021]: E0121 15:55:00.740188 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 15:55:13 crc kubenswrapper[5021]: I0121 15:55:13.737999 5021 scope.go:117] "RemoveContainer" containerID="8e9be393254a6396e6490f6f969239e3a23b32799a213435ac8de18190c629f8" Jan 21 15:55:13 crc kubenswrapper[5021]: E0121 15:55:13.738835 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 15:55:25 crc kubenswrapper[5021]: I0121 15:55:25.738371 5021 scope.go:117] "RemoveContainer" containerID="8e9be393254a6396e6490f6f969239e3a23b32799a213435ac8de18190c629f8" Jan 21 15:55:25 crc kubenswrapper[5021]: E0121 15:55:25.739181 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 15:55:36 crc kubenswrapper[5021]: I0121 15:55:36.744951 5021 scope.go:117] "RemoveContainer" containerID="8e9be393254a6396e6490f6f969239e3a23b32799a213435ac8de18190c629f8" Jan 21 15:55:36 crc kubenswrapper[5021]: E0121 15:55:36.745503 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 15:55:49 crc kubenswrapper[5021]: I0121 15:55:49.737693 5021 scope.go:117] "RemoveContainer" containerID="8e9be393254a6396e6490f6f969239e3a23b32799a213435ac8de18190c629f8" Jan 21 15:55:50 crc kubenswrapper[5021]: I0121 15:55:50.516699 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" event={"ID":"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1","Type":"ContainerStarted","Data":"e4a3d830500ab956ae0ff074856c73285c32d046519d5f6ebfbd1dfcfef47658"} Jan 21 15:58:12 crc kubenswrapper[5021]: I0121 15:58:12.357077 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 15:58:12 crc kubenswrapper[5021]: I0121 15:58:12.357696 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 15:58:42 crc kubenswrapper[5021]: I0121 15:58:42.357475 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 15:58:42 crc kubenswrapper[5021]: I0121 15:58:42.359062 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 15:58:49 crc kubenswrapper[5021]: I0121 15:58:49.677247 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-j4qsk"] Jan 21 15:58:49 crc kubenswrapper[5021]: E0121 15:58:49.678058 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac98c530-4c15-4d36-b5bb-1216caadb18d" containerName="extract-utilities" Jan 21 15:58:49 crc kubenswrapper[5021]: I0121 15:58:49.678071 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac98c530-4c15-4d36-b5bb-1216caadb18d" containerName="extract-utilities" Jan 21 15:58:49 crc kubenswrapper[5021]: E0121 15:58:49.678085 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc728c32-6ec6-4a23-8a80-e44cfe63b09c" containerName="extract-content" Jan 21 15:58:49 crc kubenswrapper[5021]: I0121 15:58:49.678091 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc728c32-6ec6-4a23-8a80-e44cfe63b09c" containerName="extract-content" Jan 21 15:58:49 crc kubenswrapper[5021]: E0121 15:58:49.678104 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac98c530-4c15-4d36-b5bb-1216caadb18d" containerName="extract-content" Jan 21 15:58:49 crc kubenswrapper[5021]: I0121 15:58:49.678115 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac98c530-4c15-4d36-b5bb-1216caadb18d" containerName="extract-content" Jan 21 15:58:49 crc kubenswrapper[5021]: E0121 15:58:49.678132 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac98c530-4c15-4d36-b5bb-1216caadb18d" containerName="registry-server" Jan 21 15:58:49 crc kubenswrapper[5021]: I0121 15:58:49.678140 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac98c530-4c15-4d36-b5bb-1216caadb18d" containerName="registry-server" Jan 21 15:58:49 crc kubenswrapper[5021]: E0121 15:58:49.678151 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc728c32-6ec6-4a23-8a80-e44cfe63b09c" containerName="registry-server" Jan 21 15:58:49 crc kubenswrapper[5021]: I0121 15:58:49.678158 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc728c32-6ec6-4a23-8a80-e44cfe63b09c" containerName="registry-server" Jan 21 15:58:49 crc kubenswrapper[5021]: E0121 15:58:49.678174 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc728c32-6ec6-4a23-8a80-e44cfe63b09c" containerName="extract-utilities" Jan 21 15:58:49 crc kubenswrapper[5021]: I0121 15:58:49.678183 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc728c32-6ec6-4a23-8a80-e44cfe63b09c" containerName="extract-utilities" Jan 21 15:58:49 crc kubenswrapper[5021]: I0121 15:58:49.678351 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac98c530-4c15-4d36-b5bb-1216caadb18d" containerName="registry-server" Jan 21 15:58:49 crc kubenswrapper[5021]: I0121 15:58:49.678369 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="dc728c32-6ec6-4a23-8a80-e44cfe63b09c" containerName="registry-server" Jan 21 15:58:49 crc kubenswrapper[5021]: I0121 15:58:49.680152 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-j4qsk" Jan 21 15:58:49 crc kubenswrapper[5021]: I0121 15:58:49.691369 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-j4qsk"] Jan 21 15:58:49 crc kubenswrapper[5021]: I0121 15:58:49.853151 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a44bfa16-8142-4883-abb8-9879019542d5-utilities\") pod \"redhat-operators-j4qsk\" (UID: \"a44bfa16-8142-4883-abb8-9879019542d5\") " pod="openshift-marketplace/redhat-operators-j4qsk" Jan 21 15:58:49 crc kubenswrapper[5021]: I0121 15:58:49.853242 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-smhzx\" (UniqueName: \"kubernetes.io/projected/a44bfa16-8142-4883-abb8-9879019542d5-kube-api-access-smhzx\") pod \"redhat-operators-j4qsk\" (UID: \"a44bfa16-8142-4883-abb8-9879019542d5\") " pod="openshift-marketplace/redhat-operators-j4qsk" Jan 21 15:58:49 crc kubenswrapper[5021]: I0121 15:58:49.853294 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a44bfa16-8142-4883-abb8-9879019542d5-catalog-content\") pod \"redhat-operators-j4qsk\" (UID: \"a44bfa16-8142-4883-abb8-9879019542d5\") " pod="openshift-marketplace/redhat-operators-j4qsk" Jan 21 15:58:49 crc kubenswrapper[5021]: I0121 15:58:49.954583 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a44bfa16-8142-4883-abb8-9879019542d5-utilities\") pod \"redhat-operators-j4qsk\" (UID: \"a44bfa16-8142-4883-abb8-9879019542d5\") " pod="openshift-marketplace/redhat-operators-j4qsk" Jan 21 15:58:49 crc kubenswrapper[5021]: I0121 15:58:49.954645 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-smhzx\" (UniqueName: \"kubernetes.io/projected/a44bfa16-8142-4883-abb8-9879019542d5-kube-api-access-smhzx\") pod \"redhat-operators-j4qsk\" (UID: \"a44bfa16-8142-4883-abb8-9879019542d5\") " pod="openshift-marketplace/redhat-operators-j4qsk" Jan 21 15:58:49 crc kubenswrapper[5021]: I0121 15:58:49.954674 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a44bfa16-8142-4883-abb8-9879019542d5-catalog-content\") pod \"redhat-operators-j4qsk\" (UID: \"a44bfa16-8142-4883-abb8-9879019542d5\") " pod="openshift-marketplace/redhat-operators-j4qsk" Jan 21 15:58:49 crc kubenswrapper[5021]: I0121 15:58:49.955261 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a44bfa16-8142-4883-abb8-9879019542d5-catalog-content\") pod \"redhat-operators-j4qsk\" (UID: \"a44bfa16-8142-4883-abb8-9879019542d5\") " pod="openshift-marketplace/redhat-operators-j4qsk" Jan 21 15:58:49 crc kubenswrapper[5021]: I0121 15:58:49.955514 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a44bfa16-8142-4883-abb8-9879019542d5-utilities\") pod \"redhat-operators-j4qsk\" (UID: \"a44bfa16-8142-4883-abb8-9879019542d5\") " pod="openshift-marketplace/redhat-operators-j4qsk" Jan 21 15:58:49 crc kubenswrapper[5021]: I0121 15:58:49.977257 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-smhzx\" (UniqueName: \"kubernetes.io/projected/a44bfa16-8142-4883-abb8-9879019542d5-kube-api-access-smhzx\") pod \"redhat-operators-j4qsk\" (UID: \"a44bfa16-8142-4883-abb8-9879019542d5\") " pod="openshift-marketplace/redhat-operators-j4qsk" Jan 21 15:58:49 crc kubenswrapper[5021]: I0121 15:58:49.999024 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-j4qsk" Jan 21 15:58:50 crc kubenswrapper[5021]: I0121 15:58:50.254548 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-j4qsk"] Jan 21 15:58:50 crc kubenswrapper[5021]: I0121 15:58:50.791073 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j4qsk" event={"ID":"a44bfa16-8142-4883-abb8-9879019542d5","Type":"ContainerStarted","Data":"afc5dbddfa43e112a74299e7e63a1d408b55b51f462ba55ef886870704ddd8c4"} Jan 21 15:58:53 crc kubenswrapper[5021]: I0121 15:58:53.815791 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j4qsk" event={"ID":"a44bfa16-8142-4883-abb8-9879019542d5","Type":"ContainerStarted","Data":"79be804dc75c16ae9211bc0dfb9d29ccbe5a3c50dc0dbfaf7704b18e562f6395"} Jan 21 15:58:54 crc kubenswrapper[5021]: I0121 15:58:54.827917 5021 generic.go:334] "Generic (PLEG): container finished" podID="a44bfa16-8142-4883-abb8-9879019542d5" containerID="79be804dc75c16ae9211bc0dfb9d29ccbe5a3c50dc0dbfaf7704b18e562f6395" exitCode=0 Jan 21 15:58:54 crc kubenswrapper[5021]: I0121 15:58:54.828029 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j4qsk" event={"ID":"a44bfa16-8142-4883-abb8-9879019542d5","Type":"ContainerDied","Data":"79be804dc75c16ae9211bc0dfb9d29ccbe5a3c50dc0dbfaf7704b18e562f6395"} Jan 21 15:58:54 crc kubenswrapper[5021]: I0121 15:58:54.829698 5021 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 21 15:58:56 crc kubenswrapper[5021]: I0121 15:58:56.852806 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j4qsk" event={"ID":"a44bfa16-8142-4883-abb8-9879019542d5","Type":"ContainerStarted","Data":"64d0ccdbf2f7d6d667d08c9fd569fbb4036cdefb6ec14f75568bc4afe09afa80"} Jan 21 15:58:57 crc kubenswrapper[5021]: I0121 15:58:57.862596 5021 generic.go:334] "Generic (PLEG): container finished" podID="a44bfa16-8142-4883-abb8-9879019542d5" containerID="64d0ccdbf2f7d6d667d08c9fd569fbb4036cdefb6ec14f75568bc4afe09afa80" exitCode=0 Jan 21 15:58:57 crc kubenswrapper[5021]: I0121 15:58:57.862655 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j4qsk" event={"ID":"a44bfa16-8142-4883-abb8-9879019542d5","Type":"ContainerDied","Data":"64d0ccdbf2f7d6d667d08c9fd569fbb4036cdefb6ec14f75568bc4afe09afa80"} Jan 21 15:59:00 crc kubenswrapper[5021]: I0121 15:59:00.883831 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j4qsk" event={"ID":"a44bfa16-8142-4883-abb8-9879019542d5","Type":"ContainerStarted","Data":"25eaea3f2c8ac2168f678650763018dd8e0753c9f5ec2629a8e9ae3c39c3b084"} Jan 21 15:59:00 crc kubenswrapper[5021]: I0121 15:59:00.908066 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-j4qsk" podStartSLOduration=7.394412118 podStartE2EDuration="11.908048392s" podCreationTimestamp="2026-01-21 15:58:49 +0000 UTC" firstStartedPulling="2026-01-21 15:58:54.829434009 +0000 UTC m=+2076.364547898" lastFinishedPulling="2026-01-21 15:58:59.343070283 +0000 UTC m=+2080.878184172" observedRunningTime="2026-01-21 15:59:00.906901081 +0000 UTC m=+2082.442014970" watchObservedRunningTime="2026-01-21 15:59:00.908048392 +0000 UTC m=+2082.443162281" Jan 21 15:59:13 crc kubenswrapper[5021]: I0121 15:59:09.999952 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-j4qsk" Jan 21 15:59:13 crc kubenswrapper[5021]: I0121 15:59:10.000495 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-j4qsk" Jan 21 15:59:13 crc kubenswrapper[5021]: I0121 15:59:10.036233 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-j4qsk" Jan 21 15:59:13 crc kubenswrapper[5021]: I0121 15:59:10.983787 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-j4qsk" Jan 21 15:59:13 crc kubenswrapper[5021]: I0121 15:59:12.357690 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 15:59:13 crc kubenswrapper[5021]: I0121 15:59:12.357771 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 15:59:13 crc kubenswrapper[5021]: I0121 15:59:12.357824 5021 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" Jan 21 15:59:13 crc kubenswrapper[5021]: I0121 15:59:12.358528 5021 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e4a3d830500ab956ae0ff074856c73285c32d046519d5f6ebfbd1dfcfef47658"} pod="openshift-machine-config-operator/machine-config-daemon-n22xz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 21 15:59:13 crc kubenswrapper[5021]: I0121 15:59:12.358601 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" containerID="cri-o://e4a3d830500ab956ae0ff074856c73285c32d046519d5f6ebfbd1dfcfef47658" gracePeriod=600 Jan 21 15:59:13 crc kubenswrapper[5021]: I0121 15:59:13.984121 5021 generic.go:334] "Generic (PLEG): container finished" podID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerID="e4a3d830500ab956ae0ff074856c73285c32d046519d5f6ebfbd1dfcfef47658" exitCode=0 Jan 21 15:59:13 crc kubenswrapper[5021]: I0121 15:59:13.984147 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" event={"ID":"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1","Type":"ContainerDied","Data":"e4a3d830500ab956ae0ff074856c73285c32d046519d5f6ebfbd1dfcfef47658"} Jan 21 15:59:13 crc kubenswrapper[5021]: I0121 15:59:13.984486 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" event={"ID":"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1","Type":"ContainerStarted","Data":"3fdde0f968b05ba58c96e801a9ae3037ca772323be7d0ee8cdd6ab413b75a18b"} Jan 21 15:59:13 crc kubenswrapper[5021]: I0121 15:59:13.984509 5021 scope.go:117] "RemoveContainer" containerID="8e9be393254a6396e6490f6f969239e3a23b32799a213435ac8de18190c629f8" Jan 21 15:59:14 crc kubenswrapper[5021]: I0121 15:59:14.470555 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-j4qsk"] Jan 21 15:59:14 crc kubenswrapper[5021]: I0121 15:59:14.471079 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-j4qsk" podUID="a44bfa16-8142-4883-abb8-9879019542d5" containerName="registry-server" containerID="cri-o://25eaea3f2c8ac2168f678650763018dd8e0753c9f5ec2629a8e9ae3c39c3b084" gracePeriod=2 Jan 21 15:59:14 crc kubenswrapper[5021]: I0121 15:59:14.947867 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-j4qsk" Jan 21 15:59:14 crc kubenswrapper[5021]: I0121 15:59:14.998596 5021 generic.go:334] "Generic (PLEG): container finished" podID="a44bfa16-8142-4883-abb8-9879019542d5" containerID="25eaea3f2c8ac2168f678650763018dd8e0753c9f5ec2629a8e9ae3c39c3b084" exitCode=0 Jan 21 15:59:14 crc kubenswrapper[5021]: I0121 15:59:14.998659 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j4qsk" event={"ID":"a44bfa16-8142-4883-abb8-9879019542d5","Type":"ContainerDied","Data":"25eaea3f2c8ac2168f678650763018dd8e0753c9f5ec2629a8e9ae3c39c3b084"} Jan 21 15:59:14 crc kubenswrapper[5021]: I0121 15:59:14.998700 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-j4qsk" Jan 21 15:59:14 crc kubenswrapper[5021]: I0121 15:59:14.998725 5021 scope.go:117] "RemoveContainer" containerID="25eaea3f2c8ac2168f678650763018dd8e0753c9f5ec2629a8e9ae3c39c3b084" Jan 21 15:59:14 crc kubenswrapper[5021]: I0121 15:59:14.998709 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j4qsk" event={"ID":"a44bfa16-8142-4883-abb8-9879019542d5","Type":"ContainerDied","Data":"afc5dbddfa43e112a74299e7e63a1d408b55b51f462ba55ef886870704ddd8c4"} Jan 21 15:59:15 crc kubenswrapper[5021]: I0121 15:59:15.018499 5021 scope.go:117] "RemoveContainer" containerID="64d0ccdbf2f7d6d667d08c9fd569fbb4036cdefb6ec14f75568bc4afe09afa80" Jan 21 15:59:15 crc kubenswrapper[5021]: I0121 15:59:15.043580 5021 scope.go:117] "RemoveContainer" containerID="79be804dc75c16ae9211bc0dfb9d29ccbe5a3c50dc0dbfaf7704b18e562f6395" Jan 21 15:59:15 crc kubenswrapper[5021]: I0121 15:59:15.067229 5021 scope.go:117] "RemoveContainer" containerID="25eaea3f2c8ac2168f678650763018dd8e0753c9f5ec2629a8e9ae3c39c3b084" Jan 21 15:59:15 crc kubenswrapper[5021]: E0121 15:59:15.067897 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"25eaea3f2c8ac2168f678650763018dd8e0753c9f5ec2629a8e9ae3c39c3b084\": container with ID starting with 25eaea3f2c8ac2168f678650763018dd8e0753c9f5ec2629a8e9ae3c39c3b084 not found: ID does not exist" containerID="25eaea3f2c8ac2168f678650763018dd8e0753c9f5ec2629a8e9ae3c39c3b084" Jan 21 15:59:15 crc kubenswrapper[5021]: I0121 15:59:15.067956 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"25eaea3f2c8ac2168f678650763018dd8e0753c9f5ec2629a8e9ae3c39c3b084"} err="failed to get container status \"25eaea3f2c8ac2168f678650763018dd8e0753c9f5ec2629a8e9ae3c39c3b084\": rpc error: code = NotFound desc = could not find container \"25eaea3f2c8ac2168f678650763018dd8e0753c9f5ec2629a8e9ae3c39c3b084\": container with ID starting with 25eaea3f2c8ac2168f678650763018dd8e0753c9f5ec2629a8e9ae3c39c3b084 not found: ID does not exist" Jan 21 15:59:15 crc kubenswrapper[5021]: I0121 15:59:15.068046 5021 scope.go:117] "RemoveContainer" containerID="64d0ccdbf2f7d6d667d08c9fd569fbb4036cdefb6ec14f75568bc4afe09afa80" Jan 21 15:59:15 crc kubenswrapper[5021]: E0121 15:59:15.068819 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"64d0ccdbf2f7d6d667d08c9fd569fbb4036cdefb6ec14f75568bc4afe09afa80\": container with ID starting with 64d0ccdbf2f7d6d667d08c9fd569fbb4036cdefb6ec14f75568bc4afe09afa80 not found: ID does not exist" containerID="64d0ccdbf2f7d6d667d08c9fd569fbb4036cdefb6ec14f75568bc4afe09afa80" Jan 21 15:59:15 crc kubenswrapper[5021]: I0121 15:59:15.068861 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"64d0ccdbf2f7d6d667d08c9fd569fbb4036cdefb6ec14f75568bc4afe09afa80"} err="failed to get container status \"64d0ccdbf2f7d6d667d08c9fd569fbb4036cdefb6ec14f75568bc4afe09afa80\": rpc error: code = NotFound desc = could not find container \"64d0ccdbf2f7d6d667d08c9fd569fbb4036cdefb6ec14f75568bc4afe09afa80\": container with ID starting with 64d0ccdbf2f7d6d667d08c9fd569fbb4036cdefb6ec14f75568bc4afe09afa80 not found: ID does not exist" Jan 21 15:59:15 crc kubenswrapper[5021]: I0121 15:59:15.068883 5021 scope.go:117] "RemoveContainer" containerID="79be804dc75c16ae9211bc0dfb9d29ccbe5a3c50dc0dbfaf7704b18e562f6395" Jan 21 15:59:15 crc kubenswrapper[5021]: E0121 15:59:15.069989 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"79be804dc75c16ae9211bc0dfb9d29ccbe5a3c50dc0dbfaf7704b18e562f6395\": container with ID starting with 79be804dc75c16ae9211bc0dfb9d29ccbe5a3c50dc0dbfaf7704b18e562f6395 not found: ID does not exist" containerID="79be804dc75c16ae9211bc0dfb9d29ccbe5a3c50dc0dbfaf7704b18e562f6395" Jan 21 15:59:15 crc kubenswrapper[5021]: I0121 15:59:15.070030 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"79be804dc75c16ae9211bc0dfb9d29ccbe5a3c50dc0dbfaf7704b18e562f6395"} err="failed to get container status \"79be804dc75c16ae9211bc0dfb9d29ccbe5a3c50dc0dbfaf7704b18e562f6395\": rpc error: code = NotFound desc = could not find container \"79be804dc75c16ae9211bc0dfb9d29ccbe5a3c50dc0dbfaf7704b18e562f6395\": container with ID starting with 79be804dc75c16ae9211bc0dfb9d29ccbe5a3c50dc0dbfaf7704b18e562f6395 not found: ID does not exist" Jan 21 15:59:15 crc kubenswrapper[5021]: I0121 15:59:15.122763 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a44bfa16-8142-4883-abb8-9879019542d5-catalog-content\") pod \"a44bfa16-8142-4883-abb8-9879019542d5\" (UID: \"a44bfa16-8142-4883-abb8-9879019542d5\") " Jan 21 15:59:15 crc kubenswrapper[5021]: I0121 15:59:15.122830 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-smhzx\" (UniqueName: \"kubernetes.io/projected/a44bfa16-8142-4883-abb8-9879019542d5-kube-api-access-smhzx\") pod \"a44bfa16-8142-4883-abb8-9879019542d5\" (UID: \"a44bfa16-8142-4883-abb8-9879019542d5\") " Jan 21 15:59:15 crc kubenswrapper[5021]: I0121 15:59:15.122859 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a44bfa16-8142-4883-abb8-9879019542d5-utilities\") pod \"a44bfa16-8142-4883-abb8-9879019542d5\" (UID: \"a44bfa16-8142-4883-abb8-9879019542d5\") " Jan 21 15:59:15 crc kubenswrapper[5021]: I0121 15:59:15.124128 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a44bfa16-8142-4883-abb8-9879019542d5-utilities" (OuterVolumeSpecName: "utilities") pod "a44bfa16-8142-4883-abb8-9879019542d5" (UID: "a44bfa16-8142-4883-abb8-9879019542d5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:59:15 crc kubenswrapper[5021]: I0121 15:59:15.131116 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a44bfa16-8142-4883-abb8-9879019542d5-kube-api-access-smhzx" (OuterVolumeSpecName: "kube-api-access-smhzx") pod "a44bfa16-8142-4883-abb8-9879019542d5" (UID: "a44bfa16-8142-4883-abb8-9879019542d5"). InnerVolumeSpecName "kube-api-access-smhzx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 15:59:15 crc kubenswrapper[5021]: I0121 15:59:15.224661 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-smhzx\" (UniqueName: \"kubernetes.io/projected/a44bfa16-8142-4883-abb8-9879019542d5-kube-api-access-smhzx\") on node \"crc\" DevicePath \"\"" Jan 21 15:59:15 crc kubenswrapper[5021]: I0121 15:59:15.224720 5021 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a44bfa16-8142-4883-abb8-9879019542d5-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 15:59:15 crc kubenswrapper[5021]: I0121 15:59:15.263250 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a44bfa16-8142-4883-abb8-9879019542d5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a44bfa16-8142-4883-abb8-9879019542d5" (UID: "a44bfa16-8142-4883-abb8-9879019542d5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 15:59:15 crc kubenswrapper[5021]: I0121 15:59:15.332900 5021 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a44bfa16-8142-4883-abb8-9879019542d5-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 15:59:15 crc kubenswrapper[5021]: I0121 15:59:15.337976 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-j4qsk"] Jan 21 15:59:15 crc kubenswrapper[5021]: I0121 15:59:15.344325 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-j4qsk"] Jan 21 15:59:15 crc kubenswrapper[5021]: E0121 15:59:15.407990 5021 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda44bfa16_8142_4883_abb8_9879019542d5.slice/crio-afc5dbddfa43e112a74299e7e63a1d408b55b51f462ba55ef886870704ddd8c4\": RecentStats: unable to find data in memory cache]" Jan 21 15:59:16 crc kubenswrapper[5021]: I0121 15:59:16.748774 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a44bfa16-8142-4883-abb8-9879019542d5" path="/var/lib/kubelet/pods/a44bfa16-8142-4883-abb8-9879019542d5/volumes" Jan 21 16:00:00 crc kubenswrapper[5021]: I0121 16:00:00.148774 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483520-w4dwd"] Jan 21 16:00:00 crc kubenswrapper[5021]: E0121 16:00:00.151672 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a44bfa16-8142-4883-abb8-9879019542d5" containerName="registry-server" Jan 21 16:00:00 crc kubenswrapper[5021]: I0121 16:00:00.151822 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="a44bfa16-8142-4883-abb8-9879019542d5" containerName="registry-server" Jan 21 16:00:00 crc kubenswrapper[5021]: E0121 16:00:00.151900 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a44bfa16-8142-4883-abb8-9879019542d5" containerName="extract-utilities" Jan 21 16:00:00 crc kubenswrapper[5021]: I0121 16:00:00.152000 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="a44bfa16-8142-4883-abb8-9879019542d5" containerName="extract-utilities" Jan 21 16:00:00 crc kubenswrapper[5021]: E0121 16:00:00.152094 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a44bfa16-8142-4883-abb8-9879019542d5" containerName="extract-content" Jan 21 16:00:00 crc kubenswrapper[5021]: I0121 16:00:00.152167 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="a44bfa16-8142-4883-abb8-9879019542d5" containerName="extract-content" Jan 21 16:00:00 crc kubenswrapper[5021]: I0121 16:00:00.152435 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="a44bfa16-8142-4883-abb8-9879019542d5" containerName="registry-server" Jan 21 16:00:00 crc kubenswrapper[5021]: I0121 16:00:00.154183 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483520-w4dwd" Jan 21 16:00:00 crc kubenswrapper[5021]: I0121 16:00:00.161458 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 21 16:00:00 crc kubenswrapper[5021]: I0121 16:00:00.163131 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 21 16:00:00 crc kubenswrapper[5021]: I0121 16:00:00.180544 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483520-w4dwd"] Jan 21 16:00:00 crc kubenswrapper[5021]: I0121 16:00:00.215954 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lf2g2\" (UniqueName: \"kubernetes.io/projected/8975a103-c0c6-470d-a4f3-e7bee2308bb6-kube-api-access-lf2g2\") pod \"collect-profiles-29483520-w4dwd\" (UID: \"8975a103-c0c6-470d-a4f3-e7bee2308bb6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483520-w4dwd" Jan 21 16:00:00 crc kubenswrapper[5021]: I0121 16:00:00.216376 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8975a103-c0c6-470d-a4f3-e7bee2308bb6-secret-volume\") pod \"collect-profiles-29483520-w4dwd\" (UID: \"8975a103-c0c6-470d-a4f3-e7bee2308bb6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483520-w4dwd" Jan 21 16:00:00 crc kubenswrapper[5021]: I0121 16:00:00.216582 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8975a103-c0c6-470d-a4f3-e7bee2308bb6-config-volume\") pod \"collect-profiles-29483520-w4dwd\" (UID: \"8975a103-c0c6-470d-a4f3-e7bee2308bb6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483520-w4dwd" Jan 21 16:00:00 crc kubenswrapper[5021]: I0121 16:00:00.317898 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8975a103-c0c6-470d-a4f3-e7bee2308bb6-config-volume\") pod \"collect-profiles-29483520-w4dwd\" (UID: \"8975a103-c0c6-470d-a4f3-e7bee2308bb6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483520-w4dwd" Jan 21 16:00:00 crc kubenswrapper[5021]: I0121 16:00:00.318004 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lf2g2\" (UniqueName: \"kubernetes.io/projected/8975a103-c0c6-470d-a4f3-e7bee2308bb6-kube-api-access-lf2g2\") pod \"collect-profiles-29483520-w4dwd\" (UID: \"8975a103-c0c6-470d-a4f3-e7bee2308bb6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483520-w4dwd" Jan 21 16:00:00 crc kubenswrapper[5021]: I0121 16:00:00.318044 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8975a103-c0c6-470d-a4f3-e7bee2308bb6-secret-volume\") pod \"collect-profiles-29483520-w4dwd\" (UID: \"8975a103-c0c6-470d-a4f3-e7bee2308bb6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483520-w4dwd" Jan 21 16:00:00 crc kubenswrapper[5021]: I0121 16:00:00.319177 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8975a103-c0c6-470d-a4f3-e7bee2308bb6-config-volume\") pod \"collect-profiles-29483520-w4dwd\" (UID: \"8975a103-c0c6-470d-a4f3-e7bee2308bb6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483520-w4dwd" Jan 21 16:00:00 crc kubenswrapper[5021]: I0121 16:00:00.324585 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8975a103-c0c6-470d-a4f3-e7bee2308bb6-secret-volume\") pod \"collect-profiles-29483520-w4dwd\" (UID: \"8975a103-c0c6-470d-a4f3-e7bee2308bb6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483520-w4dwd" Jan 21 16:00:00 crc kubenswrapper[5021]: I0121 16:00:00.335771 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lf2g2\" (UniqueName: \"kubernetes.io/projected/8975a103-c0c6-470d-a4f3-e7bee2308bb6-kube-api-access-lf2g2\") pod \"collect-profiles-29483520-w4dwd\" (UID: \"8975a103-c0c6-470d-a4f3-e7bee2308bb6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483520-w4dwd" Jan 21 16:00:00 crc kubenswrapper[5021]: I0121 16:00:00.483370 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483520-w4dwd" Jan 21 16:00:00 crc kubenswrapper[5021]: I0121 16:00:00.716227 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483520-w4dwd"] Jan 21 16:00:01 crc kubenswrapper[5021]: I0121 16:00:01.330879 5021 generic.go:334] "Generic (PLEG): container finished" podID="8975a103-c0c6-470d-a4f3-e7bee2308bb6" containerID="45068c5e8c72bdb74bb03a6b5a36960522413cee4976f58db362ba6e0b08ed85" exitCode=0 Jan 21 16:00:01 crc kubenswrapper[5021]: I0121 16:00:01.330957 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29483520-w4dwd" event={"ID":"8975a103-c0c6-470d-a4f3-e7bee2308bb6","Type":"ContainerDied","Data":"45068c5e8c72bdb74bb03a6b5a36960522413cee4976f58db362ba6e0b08ed85"} Jan 21 16:00:01 crc kubenswrapper[5021]: I0121 16:00:01.331272 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29483520-w4dwd" event={"ID":"8975a103-c0c6-470d-a4f3-e7bee2308bb6","Type":"ContainerStarted","Data":"9b6269bae44e405be345006372a8bff626659505de763ca42764e47f73a523cd"} Jan 21 16:00:02 crc kubenswrapper[5021]: I0121 16:00:02.617591 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483520-w4dwd" Jan 21 16:00:02 crc kubenswrapper[5021]: I0121 16:00:02.652479 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8975a103-c0c6-470d-a4f3-e7bee2308bb6-secret-volume\") pod \"8975a103-c0c6-470d-a4f3-e7bee2308bb6\" (UID: \"8975a103-c0c6-470d-a4f3-e7bee2308bb6\") " Jan 21 16:00:02 crc kubenswrapper[5021]: I0121 16:00:02.652995 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8975a103-c0c6-470d-a4f3-e7bee2308bb6-config-volume\") pod \"8975a103-c0c6-470d-a4f3-e7bee2308bb6\" (UID: \"8975a103-c0c6-470d-a4f3-e7bee2308bb6\") " Jan 21 16:00:02 crc kubenswrapper[5021]: I0121 16:00:02.653064 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lf2g2\" (UniqueName: \"kubernetes.io/projected/8975a103-c0c6-470d-a4f3-e7bee2308bb6-kube-api-access-lf2g2\") pod \"8975a103-c0c6-470d-a4f3-e7bee2308bb6\" (UID: \"8975a103-c0c6-470d-a4f3-e7bee2308bb6\") " Jan 21 16:00:02 crc kubenswrapper[5021]: I0121 16:00:02.653727 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8975a103-c0c6-470d-a4f3-e7bee2308bb6-config-volume" (OuterVolumeSpecName: "config-volume") pod "8975a103-c0c6-470d-a4f3-e7bee2308bb6" (UID: "8975a103-c0c6-470d-a4f3-e7bee2308bb6"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 16:00:02 crc kubenswrapper[5021]: I0121 16:00:02.659547 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8975a103-c0c6-470d-a4f3-e7bee2308bb6-kube-api-access-lf2g2" (OuterVolumeSpecName: "kube-api-access-lf2g2") pod "8975a103-c0c6-470d-a4f3-e7bee2308bb6" (UID: "8975a103-c0c6-470d-a4f3-e7bee2308bb6"). InnerVolumeSpecName "kube-api-access-lf2g2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 16:00:02 crc kubenswrapper[5021]: I0121 16:00:02.660174 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8975a103-c0c6-470d-a4f3-e7bee2308bb6-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "8975a103-c0c6-470d-a4f3-e7bee2308bb6" (UID: "8975a103-c0c6-470d-a4f3-e7bee2308bb6"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 16:00:02 crc kubenswrapper[5021]: I0121 16:00:02.754590 5021 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8975a103-c0c6-470d-a4f3-e7bee2308bb6-config-volume\") on node \"crc\" DevicePath \"\"" Jan 21 16:00:02 crc kubenswrapper[5021]: I0121 16:00:02.754637 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lf2g2\" (UniqueName: \"kubernetes.io/projected/8975a103-c0c6-470d-a4f3-e7bee2308bb6-kube-api-access-lf2g2\") on node \"crc\" DevicePath \"\"" Jan 21 16:00:02 crc kubenswrapper[5021]: I0121 16:00:02.754653 5021 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8975a103-c0c6-470d-a4f3-e7bee2308bb6-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 21 16:00:03 crc kubenswrapper[5021]: I0121 16:00:03.371468 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29483520-w4dwd" event={"ID":"8975a103-c0c6-470d-a4f3-e7bee2308bb6","Type":"ContainerDied","Data":"9b6269bae44e405be345006372a8bff626659505de763ca42764e47f73a523cd"} Jan 21 16:00:03 crc kubenswrapper[5021]: I0121 16:00:03.371527 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9b6269bae44e405be345006372a8bff626659505de763ca42764e47f73a523cd" Jan 21 16:00:03 crc kubenswrapper[5021]: I0121 16:00:03.371671 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483520-w4dwd" Jan 21 16:00:03 crc kubenswrapper[5021]: I0121 16:00:03.701722 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483475-7s6jf"] Jan 21 16:00:03 crc kubenswrapper[5021]: I0121 16:00:03.708256 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483475-7s6jf"] Jan 21 16:00:04 crc kubenswrapper[5021]: I0121 16:00:04.748661 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="71337f59-d4e5-47da-9d8e-759bd17cfdc3" path="/var/lib/kubelet/pods/71337f59-d4e5-47da-9d8e-759bd17cfdc3/volumes" Jan 21 16:00:25 crc kubenswrapper[5021]: I0121 16:00:25.030849 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-7lbb4"] Jan 21 16:00:25 crc kubenswrapper[5021]: E0121 16:00:25.031632 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8975a103-c0c6-470d-a4f3-e7bee2308bb6" containerName="collect-profiles" Jan 21 16:00:25 crc kubenswrapper[5021]: I0121 16:00:25.031644 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="8975a103-c0c6-470d-a4f3-e7bee2308bb6" containerName="collect-profiles" Jan 21 16:00:25 crc kubenswrapper[5021]: I0121 16:00:25.031769 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="8975a103-c0c6-470d-a4f3-e7bee2308bb6" containerName="collect-profiles" Jan 21 16:00:25 crc kubenswrapper[5021]: I0121 16:00:25.032703 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7lbb4" Jan 21 16:00:25 crc kubenswrapper[5021]: I0121 16:00:25.048538 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-7lbb4"] Jan 21 16:00:25 crc kubenswrapper[5021]: I0121 16:00:25.198790 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20e17f5b-1fb5-4bec-8c60-31d58c37f0cf-utilities\") pod \"certified-operators-7lbb4\" (UID: \"20e17f5b-1fb5-4bec-8c60-31d58c37f0cf\") " pod="openshift-marketplace/certified-operators-7lbb4" Jan 21 16:00:25 crc kubenswrapper[5021]: I0121 16:00:25.198850 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20e17f5b-1fb5-4bec-8c60-31d58c37f0cf-catalog-content\") pod \"certified-operators-7lbb4\" (UID: \"20e17f5b-1fb5-4bec-8c60-31d58c37f0cf\") " pod="openshift-marketplace/certified-operators-7lbb4" Jan 21 16:00:25 crc kubenswrapper[5021]: I0121 16:00:25.198882 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7rjqm\" (UniqueName: \"kubernetes.io/projected/20e17f5b-1fb5-4bec-8c60-31d58c37f0cf-kube-api-access-7rjqm\") pod \"certified-operators-7lbb4\" (UID: \"20e17f5b-1fb5-4bec-8c60-31d58c37f0cf\") " pod="openshift-marketplace/certified-operators-7lbb4" Jan 21 16:00:25 crc kubenswrapper[5021]: I0121 16:00:25.300643 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20e17f5b-1fb5-4bec-8c60-31d58c37f0cf-catalog-content\") pod \"certified-operators-7lbb4\" (UID: \"20e17f5b-1fb5-4bec-8c60-31d58c37f0cf\") " pod="openshift-marketplace/certified-operators-7lbb4" Jan 21 16:00:25 crc kubenswrapper[5021]: I0121 16:00:25.300698 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7rjqm\" (UniqueName: \"kubernetes.io/projected/20e17f5b-1fb5-4bec-8c60-31d58c37f0cf-kube-api-access-7rjqm\") pod \"certified-operators-7lbb4\" (UID: \"20e17f5b-1fb5-4bec-8c60-31d58c37f0cf\") " pod="openshift-marketplace/certified-operators-7lbb4" Jan 21 16:00:25 crc kubenswrapper[5021]: I0121 16:00:25.300816 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20e17f5b-1fb5-4bec-8c60-31d58c37f0cf-utilities\") pod \"certified-operators-7lbb4\" (UID: \"20e17f5b-1fb5-4bec-8c60-31d58c37f0cf\") " pod="openshift-marketplace/certified-operators-7lbb4" Jan 21 16:00:25 crc kubenswrapper[5021]: I0121 16:00:25.301308 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20e17f5b-1fb5-4bec-8c60-31d58c37f0cf-utilities\") pod \"certified-operators-7lbb4\" (UID: \"20e17f5b-1fb5-4bec-8c60-31d58c37f0cf\") " pod="openshift-marketplace/certified-operators-7lbb4" Jan 21 16:00:25 crc kubenswrapper[5021]: I0121 16:00:25.301445 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20e17f5b-1fb5-4bec-8c60-31d58c37f0cf-catalog-content\") pod \"certified-operators-7lbb4\" (UID: \"20e17f5b-1fb5-4bec-8c60-31d58c37f0cf\") " pod="openshift-marketplace/certified-operators-7lbb4" Jan 21 16:00:25 crc kubenswrapper[5021]: I0121 16:00:25.321455 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7rjqm\" (UniqueName: \"kubernetes.io/projected/20e17f5b-1fb5-4bec-8c60-31d58c37f0cf-kube-api-access-7rjqm\") pod \"certified-operators-7lbb4\" (UID: \"20e17f5b-1fb5-4bec-8c60-31d58c37f0cf\") " pod="openshift-marketplace/certified-operators-7lbb4" Jan 21 16:00:25 crc kubenswrapper[5021]: I0121 16:00:25.352269 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7lbb4" Jan 21 16:00:25 crc kubenswrapper[5021]: I0121 16:00:25.783500 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-7lbb4"] Jan 21 16:00:26 crc kubenswrapper[5021]: I0121 16:00:26.537668 5021 generic.go:334] "Generic (PLEG): container finished" podID="20e17f5b-1fb5-4bec-8c60-31d58c37f0cf" containerID="914465e2fb3be99571638e35d67f7095f6e4ff012b61b90eb398f9de0f9985c7" exitCode=0 Jan 21 16:00:26 crc kubenswrapper[5021]: I0121 16:00:26.537722 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7lbb4" event={"ID":"20e17f5b-1fb5-4bec-8c60-31d58c37f0cf","Type":"ContainerDied","Data":"914465e2fb3be99571638e35d67f7095f6e4ff012b61b90eb398f9de0f9985c7"} Jan 21 16:00:26 crc kubenswrapper[5021]: I0121 16:00:26.537754 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7lbb4" event={"ID":"20e17f5b-1fb5-4bec-8c60-31d58c37f0cf","Type":"ContainerStarted","Data":"aeaac66d215d6712073e000e7bf15149b5188def4e4c26aabdebf93ef0bf3205"} Jan 21 16:00:28 crc kubenswrapper[5021]: I0121 16:00:28.553001 5021 generic.go:334] "Generic (PLEG): container finished" podID="20e17f5b-1fb5-4bec-8c60-31d58c37f0cf" containerID="a273f73da2e479586a131f6f31d69680301568a0238882f659f067de148369cd" exitCode=0 Jan 21 16:00:28 crc kubenswrapper[5021]: I0121 16:00:28.553485 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7lbb4" event={"ID":"20e17f5b-1fb5-4bec-8c60-31d58c37f0cf","Type":"ContainerDied","Data":"a273f73da2e479586a131f6f31d69680301568a0238882f659f067de148369cd"} Jan 21 16:00:29 crc kubenswrapper[5021]: I0121 16:00:29.562806 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7lbb4" event={"ID":"20e17f5b-1fb5-4bec-8c60-31d58c37f0cf","Type":"ContainerStarted","Data":"14aa51d2939d243f2392f5717a0594d5b957e249e266f1f9e6519512ab6e03cc"} Jan 21 16:00:33 crc kubenswrapper[5021]: I0121 16:00:33.689051 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-7lbb4" podStartSLOduration=6.21160889 podStartE2EDuration="8.689030188s" podCreationTimestamp="2026-01-21 16:00:25 +0000 UTC" firstStartedPulling="2026-01-21 16:00:26.539436584 +0000 UTC m=+2168.074550473" lastFinishedPulling="2026-01-21 16:00:29.016857882 +0000 UTC m=+2170.551971771" observedRunningTime="2026-01-21 16:00:29.587358517 +0000 UTC m=+2171.122472426" watchObservedRunningTime="2026-01-21 16:00:33.689030188 +0000 UTC m=+2175.224144097" Jan 21 16:00:33 crc kubenswrapper[5021]: I0121 16:00:33.692704 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-2bccq"] Jan 21 16:00:33 crc kubenswrapper[5021]: I0121 16:00:33.694751 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2bccq" Jan 21 16:00:33 crc kubenswrapper[5021]: I0121 16:00:33.700323 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-2bccq"] Jan 21 16:00:33 crc kubenswrapper[5021]: I0121 16:00:33.818632 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2902a74c-98a0-4af4-890d-24376ba0d549-utilities\") pod \"redhat-marketplace-2bccq\" (UID: \"2902a74c-98a0-4af4-890d-24376ba0d549\") " pod="openshift-marketplace/redhat-marketplace-2bccq" Jan 21 16:00:33 crc kubenswrapper[5021]: I0121 16:00:33.818683 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5jzq8\" (UniqueName: \"kubernetes.io/projected/2902a74c-98a0-4af4-890d-24376ba0d549-kube-api-access-5jzq8\") pod \"redhat-marketplace-2bccq\" (UID: \"2902a74c-98a0-4af4-890d-24376ba0d549\") " pod="openshift-marketplace/redhat-marketplace-2bccq" Jan 21 16:00:33 crc kubenswrapper[5021]: I0121 16:00:33.818781 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2902a74c-98a0-4af4-890d-24376ba0d549-catalog-content\") pod \"redhat-marketplace-2bccq\" (UID: \"2902a74c-98a0-4af4-890d-24376ba0d549\") " pod="openshift-marketplace/redhat-marketplace-2bccq" Jan 21 16:00:33 crc kubenswrapper[5021]: I0121 16:00:33.920324 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2902a74c-98a0-4af4-890d-24376ba0d549-catalog-content\") pod \"redhat-marketplace-2bccq\" (UID: \"2902a74c-98a0-4af4-890d-24376ba0d549\") " pod="openshift-marketplace/redhat-marketplace-2bccq" Jan 21 16:00:33 crc kubenswrapper[5021]: I0121 16:00:33.920402 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2902a74c-98a0-4af4-890d-24376ba0d549-utilities\") pod \"redhat-marketplace-2bccq\" (UID: \"2902a74c-98a0-4af4-890d-24376ba0d549\") " pod="openshift-marketplace/redhat-marketplace-2bccq" Jan 21 16:00:33 crc kubenswrapper[5021]: I0121 16:00:33.920444 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5jzq8\" (UniqueName: \"kubernetes.io/projected/2902a74c-98a0-4af4-890d-24376ba0d549-kube-api-access-5jzq8\") pod \"redhat-marketplace-2bccq\" (UID: \"2902a74c-98a0-4af4-890d-24376ba0d549\") " pod="openshift-marketplace/redhat-marketplace-2bccq" Jan 21 16:00:33 crc kubenswrapper[5021]: I0121 16:00:33.921354 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2902a74c-98a0-4af4-890d-24376ba0d549-catalog-content\") pod \"redhat-marketplace-2bccq\" (UID: \"2902a74c-98a0-4af4-890d-24376ba0d549\") " pod="openshift-marketplace/redhat-marketplace-2bccq" Jan 21 16:00:33 crc kubenswrapper[5021]: I0121 16:00:33.921674 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2902a74c-98a0-4af4-890d-24376ba0d549-utilities\") pod \"redhat-marketplace-2bccq\" (UID: \"2902a74c-98a0-4af4-890d-24376ba0d549\") " pod="openshift-marketplace/redhat-marketplace-2bccq" Jan 21 16:00:33 crc kubenswrapper[5021]: I0121 16:00:33.943297 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5jzq8\" (UniqueName: \"kubernetes.io/projected/2902a74c-98a0-4af4-890d-24376ba0d549-kube-api-access-5jzq8\") pod \"redhat-marketplace-2bccq\" (UID: \"2902a74c-98a0-4af4-890d-24376ba0d549\") " pod="openshift-marketplace/redhat-marketplace-2bccq" Jan 21 16:00:34 crc kubenswrapper[5021]: I0121 16:00:34.013088 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2bccq" Jan 21 16:00:34 crc kubenswrapper[5021]: I0121 16:00:34.436060 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-2bccq"] Jan 21 16:00:34 crc kubenswrapper[5021]: I0121 16:00:34.594761 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2bccq" event={"ID":"2902a74c-98a0-4af4-890d-24376ba0d549","Type":"ContainerStarted","Data":"8539d43777db46ffed0ac8467d896b9472370daef1bf70d40a240edd644cc97d"} Jan 21 16:00:35 crc kubenswrapper[5021]: I0121 16:00:35.353262 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-7lbb4" Jan 21 16:00:35 crc kubenswrapper[5021]: I0121 16:00:35.353664 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-7lbb4" Jan 21 16:00:35 crc kubenswrapper[5021]: I0121 16:00:35.399642 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-7lbb4" Jan 21 16:00:35 crc kubenswrapper[5021]: I0121 16:00:35.602534 5021 generic.go:334] "Generic (PLEG): container finished" podID="2902a74c-98a0-4af4-890d-24376ba0d549" containerID="ddcf5c2224cc285ef010c013a5a0540991db90b19c2ea50fe6af9053fdb4ca59" exitCode=0 Jan 21 16:00:35 crc kubenswrapper[5021]: I0121 16:00:35.602650 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2bccq" event={"ID":"2902a74c-98a0-4af4-890d-24376ba0d549","Type":"ContainerDied","Data":"ddcf5c2224cc285ef010c013a5a0540991db90b19c2ea50fe6af9053fdb4ca59"} Jan 21 16:00:35 crc kubenswrapper[5021]: I0121 16:00:35.642319 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-7lbb4" Jan 21 16:00:36 crc kubenswrapper[5021]: I0121 16:00:36.610259 5021 generic.go:334] "Generic (PLEG): container finished" podID="2902a74c-98a0-4af4-890d-24376ba0d549" containerID="ae586a21eda107c3e8564d2762718cd3a3371c50fbd98c1382b98a2c22dbf804" exitCode=0 Jan 21 16:00:36 crc kubenswrapper[5021]: I0121 16:00:36.610421 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2bccq" event={"ID":"2902a74c-98a0-4af4-890d-24376ba0d549","Type":"ContainerDied","Data":"ae586a21eda107c3e8564d2762718cd3a3371c50fbd98c1382b98a2c22dbf804"} Jan 21 16:00:37 crc kubenswrapper[5021]: I0121 16:00:37.673174 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-7lbb4"] Jan 21 16:00:37 crc kubenswrapper[5021]: I0121 16:00:37.673701 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-7lbb4" podUID="20e17f5b-1fb5-4bec-8c60-31d58c37f0cf" containerName="registry-server" containerID="cri-o://14aa51d2939d243f2392f5717a0594d5b957e249e266f1f9e6519512ab6e03cc" gracePeriod=2 Jan 21 16:00:38 crc kubenswrapper[5021]: I0121 16:00:38.109826 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7lbb4" Jan 21 16:00:38 crc kubenswrapper[5021]: I0121 16:00:38.285760 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20e17f5b-1fb5-4bec-8c60-31d58c37f0cf-catalog-content\") pod \"20e17f5b-1fb5-4bec-8c60-31d58c37f0cf\" (UID: \"20e17f5b-1fb5-4bec-8c60-31d58c37f0cf\") " Jan 21 16:00:38 crc kubenswrapper[5021]: I0121 16:00:38.286312 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7rjqm\" (UniqueName: \"kubernetes.io/projected/20e17f5b-1fb5-4bec-8c60-31d58c37f0cf-kube-api-access-7rjqm\") pod \"20e17f5b-1fb5-4bec-8c60-31d58c37f0cf\" (UID: \"20e17f5b-1fb5-4bec-8c60-31d58c37f0cf\") " Jan 21 16:00:38 crc kubenswrapper[5021]: I0121 16:00:38.286441 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20e17f5b-1fb5-4bec-8c60-31d58c37f0cf-utilities\") pod \"20e17f5b-1fb5-4bec-8c60-31d58c37f0cf\" (UID: \"20e17f5b-1fb5-4bec-8c60-31d58c37f0cf\") " Jan 21 16:00:38 crc kubenswrapper[5021]: I0121 16:00:38.287326 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/20e17f5b-1fb5-4bec-8c60-31d58c37f0cf-utilities" (OuterVolumeSpecName: "utilities") pod "20e17f5b-1fb5-4bec-8c60-31d58c37f0cf" (UID: "20e17f5b-1fb5-4bec-8c60-31d58c37f0cf"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 16:00:38 crc kubenswrapper[5021]: I0121 16:00:38.293644 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20e17f5b-1fb5-4bec-8c60-31d58c37f0cf-kube-api-access-7rjqm" (OuterVolumeSpecName: "kube-api-access-7rjqm") pod "20e17f5b-1fb5-4bec-8c60-31d58c37f0cf" (UID: "20e17f5b-1fb5-4bec-8c60-31d58c37f0cf"). InnerVolumeSpecName "kube-api-access-7rjqm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 16:00:38 crc kubenswrapper[5021]: I0121 16:00:38.332490 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/20e17f5b-1fb5-4bec-8c60-31d58c37f0cf-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "20e17f5b-1fb5-4bec-8c60-31d58c37f0cf" (UID: "20e17f5b-1fb5-4bec-8c60-31d58c37f0cf"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 16:00:38 crc kubenswrapper[5021]: I0121 16:00:38.388577 5021 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20e17f5b-1fb5-4bec-8c60-31d58c37f0cf-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 16:00:38 crc kubenswrapper[5021]: I0121 16:00:38.388633 5021 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20e17f5b-1fb5-4bec-8c60-31d58c37f0cf-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 16:00:38 crc kubenswrapper[5021]: I0121 16:00:38.388649 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7rjqm\" (UniqueName: \"kubernetes.io/projected/20e17f5b-1fb5-4bec-8c60-31d58c37f0cf-kube-api-access-7rjqm\") on node \"crc\" DevicePath \"\"" Jan 21 16:00:38 crc kubenswrapper[5021]: I0121 16:00:38.627792 5021 generic.go:334] "Generic (PLEG): container finished" podID="20e17f5b-1fb5-4bec-8c60-31d58c37f0cf" containerID="14aa51d2939d243f2392f5717a0594d5b957e249e266f1f9e6519512ab6e03cc" exitCode=0 Jan 21 16:00:38 crc kubenswrapper[5021]: I0121 16:00:38.627840 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7lbb4" event={"ID":"20e17f5b-1fb5-4bec-8c60-31d58c37f0cf","Type":"ContainerDied","Data":"14aa51d2939d243f2392f5717a0594d5b957e249e266f1f9e6519512ab6e03cc"} Jan 21 16:00:38 crc kubenswrapper[5021]: I0121 16:00:38.627864 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7lbb4" Jan 21 16:00:38 crc kubenswrapper[5021]: I0121 16:00:38.627902 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7lbb4" event={"ID":"20e17f5b-1fb5-4bec-8c60-31d58c37f0cf","Type":"ContainerDied","Data":"aeaac66d215d6712073e000e7bf15149b5188def4e4c26aabdebf93ef0bf3205"} Jan 21 16:00:38 crc kubenswrapper[5021]: I0121 16:00:38.628002 5021 scope.go:117] "RemoveContainer" containerID="14aa51d2939d243f2392f5717a0594d5b957e249e266f1f9e6519512ab6e03cc" Jan 21 16:00:38 crc kubenswrapper[5021]: I0121 16:00:38.631747 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2bccq" event={"ID":"2902a74c-98a0-4af4-890d-24376ba0d549","Type":"ContainerStarted","Data":"27fd089f9aa3d18208f1e0f2d6a3ff2d633d213b5fac9efaabe811d976ddaa8f"} Jan 21 16:00:38 crc kubenswrapper[5021]: I0121 16:00:38.649227 5021 scope.go:117] "RemoveContainer" containerID="a273f73da2e479586a131f6f31d69680301568a0238882f659f067de148369cd" Jan 21 16:00:38 crc kubenswrapper[5021]: I0121 16:00:38.671569 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-2bccq" podStartSLOduration=3.771606392 podStartE2EDuration="5.671545946s" podCreationTimestamp="2026-01-21 16:00:33 +0000 UTC" firstStartedPulling="2026-01-21 16:00:35.604131768 +0000 UTC m=+2177.139245657" lastFinishedPulling="2026-01-21 16:00:37.504071322 +0000 UTC m=+2179.039185211" observedRunningTime="2026-01-21 16:00:38.651424676 +0000 UTC m=+2180.186538585" watchObservedRunningTime="2026-01-21 16:00:38.671545946 +0000 UTC m=+2180.206659835" Jan 21 16:00:38 crc kubenswrapper[5021]: I0121 16:00:38.677533 5021 scope.go:117] "RemoveContainer" containerID="914465e2fb3be99571638e35d67f7095f6e4ff012b61b90eb398f9de0f9985c7" Jan 21 16:00:38 crc kubenswrapper[5021]: I0121 16:00:38.680498 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-7lbb4"] Jan 21 16:00:38 crc kubenswrapper[5021]: I0121 16:00:38.686630 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-7lbb4"] Jan 21 16:00:38 crc kubenswrapper[5021]: I0121 16:00:38.703724 5021 scope.go:117] "RemoveContainer" containerID="14aa51d2939d243f2392f5717a0594d5b957e249e266f1f9e6519512ab6e03cc" Jan 21 16:00:38 crc kubenswrapper[5021]: E0121 16:00:38.704316 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"14aa51d2939d243f2392f5717a0594d5b957e249e266f1f9e6519512ab6e03cc\": container with ID starting with 14aa51d2939d243f2392f5717a0594d5b957e249e266f1f9e6519512ab6e03cc not found: ID does not exist" containerID="14aa51d2939d243f2392f5717a0594d5b957e249e266f1f9e6519512ab6e03cc" Jan 21 16:00:38 crc kubenswrapper[5021]: I0121 16:00:38.704470 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"14aa51d2939d243f2392f5717a0594d5b957e249e266f1f9e6519512ab6e03cc"} err="failed to get container status \"14aa51d2939d243f2392f5717a0594d5b957e249e266f1f9e6519512ab6e03cc\": rpc error: code = NotFound desc = could not find container \"14aa51d2939d243f2392f5717a0594d5b957e249e266f1f9e6519512ab6e03cc\": container with ID starting with 14aa51d2939d243f2392f5717a0594d5b957e249e266f1f9e6519512ab6e03cc not found: ID does not exist" Jan 21 16:00:38 crc kubenswrapper[5021]: I0121 16:00:38.704602 5021 scope.go:117] "RemoveContainer" containerID="a273f73da2e479586a131f6f31d69680301568a0238882f659f067de148369cd" Jan 21 16:00:38 crc kubenswrapper[5021]: E0121 16:00:38.705173 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a273f73da2e479586a131f6f31d69680301568a0238882f659f067de148369cd\": container with ID starting with a273f73da2e479586a131f6f31d69680301568a0238882f659f067de148369cd not found: ID does not exist" containerID="a273f73da2e479586a131f6f31d69680301568a0238882f659f067de148369cd" Jan 21 16:00:38 crc kubenswrapper[5021]: I0121 16:00:38.705219 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a273f73da2e479586a131f6f31d69680301568a0238882f659f067de148369cd"} err="failed to get container status \"a273f73da2e479586a131f6f31d69680301568a0238882f659f067de148369cd\": rpc error: code = NotFound desc = could not find container \"a273f73da2e479586a131f6f31d69680301568a0238882f659f067de148369cd\": container with ID starting with a273f73da2e479586a131f6f31d69680301568a0238882f659f067de148369cd not found: ID does not exist" Jan 21 16:00:38 crc kubenswrapper[5021]: I0121 16:00:38.705253 5021 scope.go:117] "RemoveContainer" containerID="914465e2fb3be99571638e35d67f7095f6e4ff012b61b90eb398f9de0f9985c7" Jan 21 16:00:38 crc kubenswrapper[5021]: E0121 16:00:38.705674 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"914465e2fb3be99571638e35d67f7095f6e4ff012b61b90eb398f9de0f9985c7\": container with ID starting with 914465e2fb3be99571638e35d67f7095f6e4ff012b61b90eb398f9de0f9985c7 not found: ID does not exist" containerID="914465e2fb3be99571638e35d67f7095f6e4ff012b61b90eb398f9de0f9985c7" Jan 21 16:00:38 crc kubenswrapper[5021]: I0121 16:00:38.705706 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"914465e2fb3be99571638e35d67f7095f6e4ff012b61b90eb398f9de0f9985c7"} err="failed to get container status \"914465e2fb3be99571638e35d67f7095f6e4ff012b61b90eb398f9de0f9985c7\": rpc error: code = NotFound desc = could not find container \"914465e2fb3be99571638e35d67f7095f6e4ff012b61b90eb398f9de0f9985c7\": container with ID starting with 914465e2fb3be99571638e35d67f7095f6e4ff012b61b90eb398f9de0f9985c7 not found: ID does not exist" Jan 21 16:00:38 crc kubenswrapper[5021]: I0121 16:00:38.746839 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20e17f5b-1fb5-4bec-8c60-31d58c37f0cf" path="/var/lib/kubelet/pods/20e17f5b-1fb5-4bec-8c60-31d58c37f0cf/volumes" Jan 21 16:00:44 crc kubenswrapper[5021]: I0121 16:00:44.014104 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-2bccq" Jan 21 16:00:44 crc kubenswrapper[5021]: I0121 16:00:44.014487 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-2bccq" Jan 21 16:00:44 crc kubenswrapper[5021]: I0121 16:00:44.056282 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-2bccq" Jan 21 16:00:44 crc kubenswrapper[5021]: I0121 16:00:44.708527 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-2bccq" Jan 21 16:00:44 crc kubenswrapper[5021]: I0121 16:00:44.756181 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-2bccq"] Jan 21 16:00:46 crc kubenswrapper[5021]: I0121 16:00:46.681433 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-2bccq" podUID="2902a74c-98a0-4af4-890d-24376ba0d549" containerName="registry-server" containerID="cri-o://27fd089f9aa3d18208f1e0f2d6a3ff2d633d213b5fac9efaabe811d976ddaa8f" gracePeriod=2 Jan 21 16:00:48 crc kubenswrapper[5021]: I0121 16:00:48.695918 5021 generic.go:334] "Generic (PLEG): container finished" podID="2902a74c-98a0-4af4-890d-24376ba0d549" containerID="27fd089f9aa3d18208f1e0f2d6a3ff2d633d213b5fac9efaabe811d976ddaa8f" exitCode=0 Jan 21 16:00:48 crc kubenswrapper[5021]: I0121 16:00:48.695956 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2bccq" event={"ID":"2902a74c-98a0-4af4-890d-24376ba0d549","Type":"ContainerDied","Data":"27fd089f9aa3d18208f1e0f2d6a3ff2d633d213b5fac9efaabe811d976ddaa8f"} Jan 21 16:00:48 crc kubenswrapper[5021]: I0121 16:00:48.709943 5021 scope.go:117] "RemoveContainer" containerID="a5bb51c915ed8831067aabe10257efffba87f9dc2079eb6e60ead96e294df68a" Jan 21 16:00:48 crc kubenswrapper[5021]: I0121 16:00:48.945940 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2bccq" Jan 21 16:00:49 crc kubenswrapper[5021]: I0121 16:00:49.042626 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5jzq8\" (UniqueName: \"kubernetes.io/projected/2902a74c-98a0-4af4-890d-24376ba0d549-kube-api-access-5jzq8\") pod \"2902a74c-98a0-4af4-890d-24376ba0d549\" (UID: \"2902a74c-98a0-4af4-890d-24376ba0d549\") " Jan 21 16:00:49 crc kubenswrapper[5021]: I0121 16:00:49.042788 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2902a74c-98a0-4af4-890d-24376ba0d549-utilities\") pod \"2902a74c-98a0-4af4-890d-24376ba0d549\" (UID: \"2902a74c-98a0-4af4-890d-24376ba0d549\") " Jan 21 16:00:49 crc kubenswrapper[5021]: I0121 16:00:49.042818 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2902a74c-98a0-4af4-890d-24376ba0d549-catalog-content\") pod \"2902a74c-98a0-4af4-890d-24376ba0d549\" (UID: \"2902a74c-98a0-4af4-890d-24376ba0d549\") " Jan 21 16:00:49 crc kubenswrapper[5021]: I0121 16:00:49.043817 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2902a74c-98a0-4af4-890d-24376ba0d549-utilities" (OuterVolumeSpecName: "utilities") pod "2902a74c-98a0-4af4-890d-24376ba0d549" (UID: "2902a74c-98a0-4af4-890d-24376ba0d549"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 16:00:49 crc kubenswrapper[5021]: I0121 16:00:49.048757 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2902a74c-98a0-4af4-890d-24376ba0d549-kube-api-access-5jzq8" (OuterVolumeSpecName: "kube-api-access-5jzq8") pod "2902a74c-98a0-4af4-890d-24376ba0d549" (UID: "2902a74c-98a0-4af4-890d-24376ba0d549"). InnerVolumeSpecName "kube-api-access-5jzq8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 16:00:49 crc kubenswrapper[5021]: I0121 16:00:49.069936 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2902a74c-98a0-4af4-890d-24376ba0d549-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2902a74c-98a0-4af4-890d-24376ba0d549" (UID: "2902a74c-98a0-4af4-890d-24376ba0d549"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 16:00:49 crc kubenswrapper[5021]: I0121 16:00:49.144928 5021 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2902a74c-98a0-4af4-890d-24376ba0d549-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 16:00:49 crc kubenswrapper[5021]: I0121 16:00:49.144981 5021 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2902a74c-98a0-4af4-890d-24376ba0d549-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 16:00:49 crc kubenswrapper[5021]: I0121 16:00:49.144999 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5jzq8\" (UniqueName: \"kubernetes.io/projected/2902a74c-98a0-4af4-890d-24376ba0d549-kube-api-access-5jzq8\") on node \"crc\" DevicePath \"\"" Jan 21 16:00:49 crc kubenswrapper[5021]: I0121 16:00:49.703832 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2bccq" event={"ID":"2902a74c-98a0-4af4-890d-24376ba0d549","Type":"ContainerDied","Data":"8539d43777db46ffed0ac8467d896b9472370daef1bf70d40a240edd644cc97d"} Jan 21 16:00:49 crc kubenswrapper[5021]: I0121 16:00:49.703892 5021 scope.go:117] "RemoveContainer" containerID="27fd089f9aa3d18208f1e0f2d6a3ff2d633d213b5fac9efaabe811d976ddaa8f" Jan 21 16:00:49 crc kubenswrapper[5021]: I0121 16:00:49.703930 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2bccq" Jan 21 16:00:49 crc kubenswrapper[5021]: I0121 16:00:49.722392 5021 scope.go:117] "RemoveContainer" containerID="ae586a21eda107c3e8564d2762718cd3a3371c50fbd98c1382b98a2c22dbf804" Jan 21 16:00:49 crc kubenswrapper[5021]: I0121 16:00:49.740926 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-2bccq"] Jan 21 16:00:49 crc kubenswrapper[5021]: I0121 16:00:49.748381 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-2bccq"] Jan 21 16:00:49 crc kubenswrapper[5021]: I0121 16:00:49.770617 5021 scope.go:117] "RemoveContainer" containerID="ddcf5c2224cc285ef010c013a5a0540991db90b19c2ea50fe6af9053fdb4ca59" Jan 21 16:00:50 crc kubenswrapper[5021]: I0121 16:00:50.746552 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2902a74c-98a0-4af4-890d-24376ba0d549" path="/var/lib/kubelet/pods/2902a74c-98a0-4af4-890d-24376ba0d549/volumes" Jan 21 16:01:16 crc kubenswrapper[5021]: I0121 16:01:16.501850 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-7zsl7"] Jan 21 16:01:16 crc kubenswrapper[5021]: E0121 16:01:16.503142 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20e17f5b-1fb5-4bec-8c60-31d58c37f0cf" containerName="extract-utilities" Jan 21 16:01:16 crc kubenswrapper[5021]: I0121 16:01:16.503162 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="20e17f5b-1fb5-4bec-8c60-31d58c37f0cf" containerName="extract-utilities" Jan 21 16:01:16 crc kubenswrapper[5021]: E0121 16:01:16.503177 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2902a74c-98a0-4af4-890d-24376ba0d549" containerName="registry-server" Jan 21 16:01:16 crc kubenswrapper[5021]: I0121 16:01:16.503183 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="2902a74c-98a0-4af4-890d-24376ba0d549" containerName="registry-server" Jan 21 16:01:16 crc kubenswrapper[5021]: E0121 16:01:16.503192 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20e17f5b-1fb5-4bec-8c60-31d58c37f0cf" containerName="extract-content" Jan 21 16:01:16 crc kubenswrapper[5021]: I0121 16:01:16.503197 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="20e17f5b-1fb5-4bec-8c60-31d58c37f0cf" containerName="extract-content" Jan 21 16:01:16 crc kubenswrapper[5021]: E0121 16:01:16.503206 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2902a74c-98a0-4af4-890d-24376ba0d549" containerName="extract-utilities" Jan 21 16:01:16 crc kubenswrapper[5021]: I0121 16:01:16.503211 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="2902a74c-98a0-4af4-890d-24376ba0d549" containerName="extract-utilities" Jan 21 16:01:16 crc kubenswrapper[5021]: E0121 16:01:16.503234 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20e17f5b-1fb5-4bec-8c60-31d58c37f0cf" containerName="registry-server" Jan 21 16:01:16 crc kubenswrapper[5021]: I0121 16:01:16.503240 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="20e17f5b-1fb5-4bec-8c60-31d58c37f0cf" containerName="registry-server" Jan 21 16:01:16 crc kubenswrapper[5021]: E0121 16:01:16.503249 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2902a74c-98a0-4af4-890d-24376ba0d549" containerName="extract-content" Jan 21 16:01:16 crc kubenswrapper[5021]: I0121 16:01:16.503255 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="2902a74c-98a0-4af4-890d-24376ba0d549" containerName="extract-content" Jan 21 16:01:16 crc kubenswrapper[5021]: I0121 16:01:16.503405 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="2902a74c-98a0-4af4-890d-24376ba0d549" containerName="registry-server" Jan 21 16:01:16 crc kubenswrapper[5021]: I0121 16:01:16.503422 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="20e17f5b-1fb5-4bec-8c60-31d58c37f0cf" containerName="registry-server" Jan 21 16:01:16 crc kubenswrapper[5021]: I0121 16:01:16.504426 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7zsl7" Jan 21 16:01:16 crc kubenswrapper[5021]: I0121 16:01:16.525544 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-7zsl7"] Jan 21 16:01:16 crc kubenswrapper[5021]: I0121 16:01:16.700278 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/17d17810-b824-42a1-a611-a1ece332c34c-utilities\") pod \"community-operators-7zsl7\" (UID: \"17d17810-b824-42a1-a611-a1ece332c34c\") " pod="openshift-marketplace/community-operators-7zsl7" Jan 21 16:01:16 crc kubenswrapper[5021]: I0121 16:01:16.700623 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zjpnm\" (UniqueName: \"kubernetes.io/projected/17d17810-b824-42a1-a611-a1ece332c34c-kube-api-access-zjpnm\") pod \"community-operators-7zsl7\" (UID: \"17d17810-b824-42a1-a611-a1ece332c34c\") " pod="openshift-marketplace/community-operators-7zsl7" Jan 21 16:01:16 crc kubenswrapper[5021]: I0121 16:01:16.700778 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/17d17810-b824-42a1-a611-a1ece332c34c-catalog-content\") pod \"community-operators-7zsl7\" (UID: \"17d17810-b824-42a1-a611-a1ece332c34c\") " pod="openshift-marketplace/community-operators-7zsl7" Jan 21 16:01:16 crc kubenswrapper[5021]: I0121 16:01:16.801757 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/17d17810-b824-42a1-a611-a1ece332c34c-catalog-content\") pod \"community-operators-7zsl7\" (UID: \"17d17810-b824-42a1-a611-a1ece332c34c\") " pod="openshift-marketplace/community-operators-7zsl7" Jan 21 16:01:16 crc kubenswrapper[5021]: I0121 16:01:16.801867 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/17d17810-b824-42a1-a611-a1ece332c34c-utilities\") pod \"community-operators-7zsl7\" (UID: \"17d17810-b824-42a1-a611-a1ece332c34c\") " pod="openshift-marketplace/community-operators-7zsl7" Jan 21 16:01:16 crc kubenswrapper[5021]: I0121 16:01:16.801898 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zjpnm\" (UniqueName: \"kubernetes.io/projected/17d17810-b824-42a1-a611-a1ece332c34c-kube-api-access-zjpnm\") pod \"community-operators-7zsl7\" (UID: \"17d17810-b824-42a1-a611-a1ece332c34c\") " pod="openshift-marketplace/community-operators-7zsl7" Jan 21 16:01:16 crc kubenswrapper[5021]: I0121 16:01:16.802767 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/17d17810-b824-42a1-a611-a1ece332c34c-catalog-content\") pod \"community-operators-7zsl7\" (UID: \"17d17810-b824-42a1-a611-a1ece332c34c\") " pod="openshift-marketplace/community-operators-7zsl7" Jan 21 16:01:16 crc kubenswrapper[5021]: I0121 16:01:16.803081 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/17d17810-b824-42a1-a611-a1ece332c34c-utilities\") pod \"community-operators-7zsl7\" (UID: \"17d17810-b824-42a1-a611-a1ece332c34c\") " pod="openshift-marketplace/community-operators-7zsl7" Jan 21 16:01:16 crc kubenswrapper[5021]: I0121 16:01:16.839110 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zjpnm\" (UniqueName: \"kubernetes.io/projected/17d17810-b824-42a1-a611-a1ece332c34c-kube-api-access-zjpnm\") pod \"community-operators-7zsl7\" (UID: \"17d17810-b824-42a1-a611-a1ece332c34c\") " pod="openshift-marketplace/community-operators-7zsl7" Jan 21 16:01:17 crc kubenswrapper[5021]: I0121 16:01:17.126435 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7zsl7" Jan 21 16:01:17 crc kubenswrapper[5021]: I0121 16:01:17.552720 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-7zsl7"] Jan 21 16:01:17 crc kubenswrapper[5021]: I0121 16:01:17.917399 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7zsl7" event={"ID":"17d17810-b824-42a1-a611-a1ece332c34c","Type":"ContainerStarted","Data":"a71ddae3b13313a65c6539eac5dfddaf3eaa91928ef6046efc3e260df7215559"} Jan 21 16:01:18 crc kubenswrapper[5021]: I0121 16:01:18.929145 5021 generic.go:334] "Generic (PLEG): container finished" podID="17d17810-b824-42a1-a611-a1ece332c34c" containerID="818eeb6e48536cc773a86197a0bd453e1a6b1f8600538bc413bb7d50a984bd79" exitCode=0 Jan 21 16:01:18 crc kubenswrapper[5021]: I0121 16:01:18.929198 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7zsl7" event={"ID":"17d17810-b824-42a1-a611-a1ece332c34c","Type":"ContainerDied","Data":"818eeb6e48536cc773a86197a0bd453e1a6b1f8600538bc413bb7d50a984bd79"} Jan 21 16:01:21 crc kubenswrapper[5021]: I0121 16:01:21.951412 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7zsl7" event={"ID":"17d17810-b824-42a1-a611-a1ece332c34c","Type":"ContainerStarted","Data":"f70a9554a8f1e3eb2f69f6a2e5956800b0225773de68bfe8ed61910e6551d8e8"} Jan 21 16:01:22 crc kubenswrapper[5021]: I0121 16:01:22.959533 5021 generic.go:334] "Generic (PLEG): container finished" podID="17d17810-b824-42a1-a611-a1ece332c34c" containerID="f70a9554a8f1e3eb2f69f6a2e5956800b0225773de68bfe8ed61910e6551d8e8" exitCode=0 Jan 21 16:01:22 crc kubenswrapper[5021]: I0121 16:01:22.959584 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7zsl7" event={"ID":"17d17810-b824-42a1-a611-a1ece332c34c","Type":"ContainerDied","Data":"f70a9554a8f1e3eb2f69f6a2e5956800b0225773de68bfe8ed61910e6551d8e8"} Jan 21 16:01:24 crc kubenswrapper[5021]: I0121 16:01:24.974902 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7zsl7" event={"ID":"17d17810-b824-42a1-a611-a1ece332c34c","Type":"ContainerStarted","Data":"3ab24f60bca74827dc3ad89efc021fab611bdf2cde49d980593826b491fb59ea"} Jan 21 16:01:24 crc kubenswrapper[5021]: I0121 16:01:24.998101 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-7zsl7" podStartSLOduration=3.401316403 podStartE2EDuration="8.998078962s" podCreationTimestamp="2026-01-21 16:01:16 +0000 UTC" firstStartedPulling="2026-01-21 16:01:18.931266572 +0000 UTC m=+2220.466380461" lastFinishedPulling="2026-01-21 16:01:24.528029131 +0000 UTC m=+2226.063143020" observedRunningTime="2026-01-21 16:01:24.991137983 +0000 UTC m=+2226.526251892" watchObservedRunningTime="2026-01-21 16:01:24.998078962 +0000 UTC m=+2226.533192851" Jan 21 16:01:27 crc kubenswrapper[5021]: I0121 16:01:27.126821 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-7zsl7" Jan 21 16:01:27 crc kubenswrapper[5021]: I0121 16:01:27.126876 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-7zsl7" Jan 21 16:01:27 crc kubenswrapper[5021]: I0121 16:01:27.169729 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-7zsl7" Jan 21 16:01:37 crc kubenswrapper[5021]: I0121 16:01:37.173322 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-7zsl7" Jan 21 16:01:37 crc kubenswrapper[5021]: I0121 16:01:37.216054 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-7zsl7"] Jan 21 16:01:38 crc kubenswrapper[5021]: I0121 16:01:38.125112 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-7zsl7" podUID="17d17810-b824-42a1-a611-a1ece332c34c" containerName="registry-server" containerID="cri-o://3ab24f60bca74827dc3ad89efc021fab611bdf2cde49d980593826b491fb59ea" gracePeriod=2 Jan 21 16:01:40 crc kubenswrapper[5021]: I0121 16:01:40.144028 5021 generic.go:334] "Generic (PLEG): container finished" podID="17d17810-b824-42a1-a611-a1ece332c34c" containerID="3ab24f60bca74827dc3ad89efc021fab611bdf2cde49d980593826b491fb59ea" exitCode=0 Jan 21 16:01:40 crc kubenswrapper[5021]: I0121 16:01:40.144095 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7zsl7" event={"ID":"17d17810-b824-42a1-a611-a1ece332c34c","Type":"ContainerDied","Data":"3ab24f60bca74827dc3ad89efc021fab611bdf2cde49d980593826b491fb59ea"} Jan 21 16:01:40 crc kubenswrapper[5021]: I0121 16:01:40.347251 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7zsl7" Jan 21 16:01:40 crc kubenswrapper[5021]: I0121 16:01:40.362566 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/17d17810-b824-42a1-a611-a1ece332c34c-catalog-content\") pod \"17d17810-b824-42a1-a611-a1ece332c34c\" (UID: \"17d17810-b824-42a1-a611-a1ece332c34c\") " Jan 21 16:01:40 crc kubenswrapper[5021]: I0121 16:01:40.362629 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zjpnm\" (UniqueName: \"kubernetes.io/projected/17d17810-b824-42a1-a611-a1ece332c34c-kube-api-access-zjpnm\") pod \"17d17810-b824-42a1-a611-a1ece332c34c\" (UID: \"17d17810-b824-42a1-a611-a1ece332c34c\") " Jan 21 16:01:40 crc kubenswrapper[5021]: I0121 16:01:40.362767 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/17d17810-b824-42a1-a611-a1ece332c34c-utilities\") pod \"17d17810-b824-42a1-a611-a1ece332c34c\" (UID: \"17d17810-b824-42a1-a611-a1ece332c34c\") " Jan 21 16:01:40 crc kubenswrapper[5021]: I0121 16:01:40.372044 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/17d17810-b824-42a1-a611-a1ece332c34c-utilities" (OuterVolumeSpecName: "utilities") pod "17d17810-b824-42a1-a611-a1ece332c34c" (UID: "17d17810-b824-42a1-a611-a1ece332c34c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 16:01:40 crc kubenswrapper[5021]: I0121 16:01:40.372899 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/17d17810-b824-42a1-a611-a1ece332c34c-kube-api-access-zjpnm" (OuterVolumeSpecName: "kube-api-access-zjpnm") pod "17d17810-b824-42a1-a611-a1ece332c34c" (UID: "17d17810-b824-42a1-a611-a1ece332c34c"). InnerVolumeSpecName "kube-api-access-zjpnm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 16:01:40 crc kubenswrapper[5021]: I0121 16:01:40.432984 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/17d17810-b824-42a1-a611-a1ece332c34c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "17d17810-b824-42a1-a611-a1ece332c34c" (UID: "17d17810-b824-42a1-a611-a1ece332c34c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 16:01:40 crc kubenswrapper[5021]: I0121 16:01:40.467790 5021 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/17d17810-b824-42a1-a611-a1ece332c34c-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 16:01:40 crc kubenswrapper[5021]: I0121 16:01:40.467830 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zjpnm\" (UniqueName: \"kubernetes.io/projected/17d17810-b824-42a1-a611-a1ece332c34c-kube-api-access-zjpnm\") on node \"crc\" DevicePath \"\"" Jan 21 16:01:40 crc kubenswrapper[5021]: I0121 16:01:40.467843 5021 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/17d17810-b824-42a1-a611-a1ece332c34c-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 16:01:41 crc kubenswrapper[5021]: I0121 16:01:41.158985 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7zsl7" event={"ID":"17d17810-b824-42a1-a611-a1ece332c34c","Type":"ContainerDied","Data":"a71ddae3b13313a65c6539eac5dfddaf3eaa91928ef6046efc3e260df7215559"} Jan 21 16:01:41 crc kubenswrapper[5021]: I0121 16:01:41.159076 5021 scope.go:117] "RemoveContainer" containerID="3ab24f60bca74827dc3ad89efc021fab611bdf2cde49d980593826b491fb59ea" Jan 21 16:01:41 crc kubenswrapper[5021]: I0121 16:01:41.159266 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7zsl7" Jan 21 16:01:41 crc kubenswrapper[5021]: I0121 16:01:41.193142 5021 scope.go:117] "RemoveContainer" containerID="f70a9554a8f1e3eb2f69f6a2e5956800b0225773de68bfe8ed61910e6551d8e8" Jan 21 16:01:41 crc kubenswrapper[5021]: I0121 16:01:41.195375 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-7zsl7"] Jan 21 16:01:41 crc kubenswrapper[5021]: I0121 16:01:41.209452 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-7zsl7"] Jan 21 16:01:41 crc kubenswrapper[5021]: I0121 16:01:41.226056 5021 scope.go:117] "RemoveContainer" containerID="818eeb6e48536cc773a86197a0bd453e1a6b1f8600538bc413bb7d50a984bd79" Jan 21 16:01:42 crc kubenswrapper[5021]: I0121 16:01:42.357534 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 16:01:42 crc kubenswrapper[5021]: I0121 16:01:42.357980 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 16:01:42 crc kubenswrapper[5021]: I0121 16:01:42.749466 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="17d17810-b824-42a1-a611-a1ece332c34c" path="/var/lib/kubelet/pods/17d17810-b824-42a1-a611-a1ece332c34c/volumes" Jan 21 16:02:12 crc kubenswrapper[5021]: I0121 16:02:12.357324 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 16:02:12 crc kubenswrapper[5021]: I0121 16:02:12.359861 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 16:02:42 crc kubenswrapper[5021]: I0121 16:02:42.357161 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 16:02:42 crc kubenswrapper[5021]: I0121 16:02:42.357737 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 16:02:42 crc kubenswrapper[5021]: I0121 16:02:42.357786 5021 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" Jan 21 16:02:42 crc kubenswrapper[5021]: I0121 16:02:42.358485 5021 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3fdde0f968b05ba58c96e801a9ae3037ca772323be7d0ee8cdd6ab413b75a18b"} pod="openshift-machine-config-operator/machine-config-daemon-n22xz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 21 16:02:42 crc kubenswrapper[5021]: I0121 16:02:42.358542 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" containerID="cri-o://3fdde0f968b05ba58c96e801a9ae3037ca772323be7d0ee8cdd6ab413b75a18b" gracePeriod=600 Jan 21 16:02:42 crc kubenswrapper[5021]: E0121 16:02:42.498786 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:02:42 crc kubenswrapper[5021]: I0121 16:02:42.611457 5021 generic.go:334] "Generic (PLEG): container finished" podID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerID="3fdde0f968b05ba58c96e801a9ae3037ca772323be7d0ee8cdd6ab413b75a18b" exitCode=0 Jan 21 16:02:42 crc kubenswrapper[5021]: I0121 16:02:42.611497 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" event={"ID":"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1","Type":"ContainerDied","Data":"3fdde0f968b05ba58c96e801a9ae3037ca772323be7d0ee8cdd6ab413b75a18b"} Jan 21 16:02:42 crc kubenswrapper[5021]: I0121 16:02:42.611536 5021 scope.go:117] "RemoveContainer" containerID="e4a3d830500ab956ae0ff074856c73285c32d046519d5f6ebfbd1dfcfef47658" Jan 21 16:02:42 crc kubenswrapper[5021]: I0121 16:02:42.612098 5021 scope.go:117] "RemoveContainer" containerID="3fdde0f968b05ba58c96e801a9ae3037ca772323be7d0ee8cdd6ab413b75a18b" Jan 21 16:02:42 crc kubenswrapper[5021]: E0121 16:02:42.612308 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:02:55 crc kubenswrapper[5021]: I0121 16:02:55.738431 5021 scope.go:117] "RemoveContainer" containerID="3fdde0f968b05ba58c96e801a9ae3037ca772323be7d0ee8cdd6ab413b75a18b" Jan 21 16:02:55 crc kubenswrapper[5021]: E0121 16:02:55.739317 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:03:09 crc kubenswrapper[5021]: I0121 16:03:09.738074 5021 scope.go:117] "RemoveContainer" containerID="3fdde0f968b05ba58c96e801a9ae3037ca772323be7d0ee8cdd6ab413b75a18b" Jan 21 16:03:09 crc kubenswrapper[5021]: E0121 16:03:09.739384 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:03:22 crc kubenswrapper[5021]: I0121 16:03:22.744729 5021 scope.go:117] "RemoveContainer" containerID="3fdde0f968b05ba58c96e801a9ae3037ca772323be7d0ee8cdd6ab413b75a18b" Jan 21 16:03:22 crc kubenswrapper[5021]: E0121 16:03:22.745553 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:03:35 crc kubenswrapper[5021]: I0121 16:03:35.738393 5021 scope.go:117] "RemoveContainer" containerID="3fdde0f968b05ba58c96e801a9ae3037ca772323be7d0ee8cdd6ab413b75a18b" Jan 21 16:03:35 crc kubenswrapper[5021]: E0121 16:03:35.739268 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:03:48 crc kubenswrapper[5021]: I0121 16:03:48.741687 5021 scope.go:117] "RemoveContainer" containerID="3fdde0f968b05ba58c96e801a9ae3037ca772323be7d0ee8cdd6ab413b75a18b" Jan 21 16:03:48 crc kubenswrapper[5021]: E0121 16:03:48.742396 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:04:00 crc kubenswrapper[5021]: I0121 16:04:00.737540 5021 scope.go:117] "RemoveContainer" containerID="3fdde0f968b05ba58c96e801a9ae3037ca772323be7d0ee8cdd6ab413b75a18b" Jan 21 16:04:00 crc kubenswrapper[5021]: E0121 16:04:00.738333 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:04:13 crc kubenswrapper[5021]: I0121 16:04:13.737517 5021 scope.go:117] "RemoveContainer" containerID="3fdde0f968b05ba58c96e801a9ae3037ca772323be7d0ee8cdd6ab413b75a18b" Jan 21 16:04:13 crc kubenswrapper[5021]: E0121 16:04:13.738276 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:04:25 crc kubenswrapper[5021]: I0121 16:04:25.737688 5021 scope.go:117] "RemoveContainer" containerID="3fdde0f968b05ba58c96e801a9ae3037ca772323be7d0ee8cdd6ab413b75a18b" Jan 21 16:04:25 crc kubenswrapper[5021]: E0121 16:04:25.738598 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:04:40 crc kubenswrapper[5021]: I0121 16:04:40.738061 5021 scope.go:117] "RemoveContainer" containerID="3fdde0f968b05ba58c96e801a9ae3037ca772323be7d0ee8cdd6ab413b75a18b" Jan 21 16:04:40 crc kubenswrapper[5021]: E0121 16:04:40.738859 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:04:53 crc kubenswrapper[5021]: I0121 16:04:53.738016 5021 scope.go:117] "RemoveContainer" containerID="3fdde0f968b05ba58c96e801a9ae3037ca772323be7d0ee8cdd6ab413b75a18b" Jan 21 16:04:53 crc kubenswrapper[5021]: E0121 16:04:53.738990 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:05:07 crc kubenswrapper[5021]: I0121 16:05:07.738142 5021 scope.go:117] "RemoveContainer" containerID="3fdde0f968b05ba58c96e801a9ae3037ca772323be7d0ee8cdd6ab413b75a18b" Jan 21 16:05:07 crc kubenswrapper[5021]: E0121 16:05:07.738956 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:05:20 crc kubenswrapper[5021]: I0121 16:05:20.737762 5021 scope.go:117] "RemoveContainer" containerID="3fdde0f968b05ba58c96e801a9ae3037ca772323be7d0ee8cdd6ab413b75a18b" Jan 21 16:05:20 crc kubenswrapper[5021]: E0121 16:05:20.738739 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:05:32 crc kubenswrapper[5021]: I0121 16:05:32.737440 5021 scope.go:117] "RemoveContainer" containerID="3fdde0f968b05ba58c96e801a9ae3037ca772323be7d0ee8cdd6ab413b75a18b" Jan 21 16:05:32 crc kubenswrapper[5021]: E0121 16:05:32.738150 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:05:43 crc kubenswrapper[5021]: I0121 16:05:43.738471 5021 scope.go:117] "RemoveContainer" containerID="3fdde0f968b05ba58c96e801a9ae3037ca772323be7d0ee8cdd6ab413b75a18b" Jan 21 16:05:43 crc kubenswrapper[5021]: E0121 16:05:43.740598 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:05:58 crc kubenswrapper[5021]: I0121 16:05:58.742604 5021 scope.go:117] "RemoveContainer" containerID="3fdde0f968b05ba58c96e801a9ae3037ca772323be7d0ee8cdd6ab413b75a18b" Jan 21 16:05:58 crc kubenswrapper[5021]: E0121 16:05:58.743460 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:06:09 crc kubenswrapper[5021]: I0121 16:06:09.738426 5021 scope.go:117] "RemoveContainer" containerID="3fdde0f968b05ba58c96e801a9ae3037ca772323be7d0ee8cdd6ab413b75a18b" Jan 21 16:06:09 crc kubenswrapper[5021]: E0121 16:06:09.739730 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:06:22 crc kubenswrapper[5021]: I0121 16:06:22.737771 5021 scope.go:117] "RemoveContainer" containerID="3fdde0f968b05ba58c96e801a9ae3037ca772323be7d0ee8cdd6ab413b75a18b" Jan 21 16:06:22 crc kubenswrapper[5021]: E0121 16:06:22.738630 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:06:37 crc kubenswrapper[5021]: I0121 16:06:37.737421 5021 scope.go:117] "RemoveContainer" containerID="3fdde0f968b05ba58c96e801a9ae3037ca772323be7d0ee8cdd6ab413b75a18b" Jan 21 16:06:37 crc kubenswrapper[5021]: E0121 16:06:37.738198 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:06:48 crc kubenswrapper[5021]: I0121 16:06:48.742140 5021 scope.go:117] "RemoveContainer" containerID="3fdde0f968b05ba58c96e801a9ae3037ca772323be7d0ee8cdd6ab413b75a18b" Jan 21 16:06:48 crc kubenswrapper[5021]: E0121 16:06:48.743262 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:07:00 crc kubenswrapper[5021]: I0121 16:07:00.738857 5021 scope.go:117] "RemoveContainer" containerID="3fdde0f968b05ba58c96e801a9ae3037ca772323be7d0ee8cdd6ab413b75a18b" Jan 21 16:07:00 crc kubenswrapper[5021]: E0121 16:07:00.740329 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:07:14 crc kubenswrapper[5021]: I0121 16:07:14.739218 5021 scope.go:117] "RemoveContainer" containerID="3fdde0f968b05ba58c96e801a9ae3037ca772323be7d0ee8cdd6ab413b75a18b" Jan 21 16:07:14 crc kubenswrapper[5021]: E0121 16:07:14.739838 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:07:28 crc kubenswrapper[5021]: I0121 16:07:28.741753 5021 scope.go:117] "RemoveContainer" containerID="3fdde0f968b05ba58c96e801a9ae3037ca772323be7d0ee8cdd6ab413b75a18b" Jan 21 16:07:28 crc kubenswrapper[5021]: E0121 16:07:28.742566 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:07:40 crc kubenswrapper[5021]: I0121 16:07:40.738716 5021 scope.go:117] "RemoveContainer" containerID="3fdde0f968b05ba58c96e801a9ae3037ca772323be7d0ee8cdd6ab413b75a18b" Jan 21 16:07:40 crc kubenswrapper[5021]: E0121 16:07:40.739980 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:07:55 crc kubenswrapper[5021]: I0121 16:07:55.738720 5021 scope.go:117] "RemoveContainer" containerID="3fdde0f968b05ba58c96e801a9ae3037ca772323be7d0ee8cdd6ab413b75a18b" Jan 21 16:07:56 crc kubenswrapper[5021]: I0121 16:07:56.850965 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" event={"ID":"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1","Type":"ContainerStarted","Data":"cbc78d8873867ec620819f7a63bde2e1fd0fdabfa4bf3bc1ed623bc7db2de3b8"} Jan 21 16:09:47 crc kubenswrapper[5021]: I0121 16:09:47.059361 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-ldgqc"] Jan 21 16:09:47 crc kubenswrapper[5021]: E0121 16:09:47.060658 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17d17810-b824-42a1-a611-a1ece332c34c" containerName="extract-utilities" Jan 21 16:09:47 crc kubenswrapper[5021]: I0121 16:09:47.060677 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="17d17810-b824-42a1-a611-a1ece332c34c" containerName="extract-utilities" Jan 21 16:09:47 crc kubenswrapper[5021]: E0121 16:09:47.060688 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17d17810-b824-42a1-a611-a1ece332c34c" containerName="registry-server" Jan 21 16:09:47 crc kubenswrapper[5021]: I0121 16:09:47.060695 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="17d17810-b824-42a1-a611-a1ece332c34c" containerName="registry-server" Jan 21 16:09:47 crc kubenswrapper[5021]: E0121 16:09:47.060712 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17d17810-b824-42a1-a611-a1ece332c34c" containerName="extract-content" Jan 21 16:09:47 crc kubenswrapper[5021]: I0121 16:09:47.060719 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="17d17810-b824-42a1-a611-a1ece332c34c" containerName="extract-content" Jan 21 16:09:47 crc kubenswrapper[5021]: I0121 16:09:47.060962 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="17d17810-b824-42a1-a611-a1ece332c34c" containerName="registry-server" Jan 21 16:09:47 crc kubenswrapper[5021]: I0121 16:09:47.062092 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ldgqc" Jan 21 16:09:47 crc kubenswrapper[5021]: I0121 16:09:47.086901 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-ldgqc"] Jan 21 16:09:47 crc kubenswrapper[5021]: I0121 16:09:47.199608 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mt9zz\" (UniqueName: \"kubernetes.io/projected/43bf3674-37b4-47e7-afa4-4d441c477cdc-kube-api-access-mt9zz\") pod \"redhat-operators-ldgqc\" (UID: \"43bf3674-37b4-47e7-afa4-4d441c477cdc\") " pod="openshift-marketplace/redhat-operators-ldgqc" Jan 21 16:09:47 crc kubenswrapper[5021]: I0121 16:09:47.199838 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/43bf3674-37b4-47e7-afa4-4d441c477cdc-utilities\") pod \"redhat-operators-ldgqc\" (UID: \"43bf3674-37b4-47e7-afa4-4d441c477cdc\") " pod="openshift-marketplace/redhat-operators-ldgqc" Jan 21 16:09:47 crc kubenswrapper[5021]: I0121 16:09:47.199969 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/43bf3674-37b4-47e7-afa4-4d441c477cdc-catalog-content\") pod \"redhat-operators-ldgqc\" (UID: \"43bf3674-37b4-47e7-afa4-4d441c477cdc\") " pod="openshift-marketplace/redhat-operators-ldgqc" Jan 21 16:09:47 crc kubenswrapper[5021]: I0121 16:09:47.301684 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mt9zz\" (UniqueName: \"kubernetes.io/projected/43bf3674-37b4-47e7-afa4-4d441c477cdc-kube-api-access-mt9zz\") pod \"redhat-operators-ldgqc\" (UID: \"43bf3674-37b4-47e7-afa4-4d441c477cdc\") " pod="openshift-marketplace/redhat-operators-ldgqc" Jan 21 16:09:47 crc kubenswrapper[5021]: I0121 16:09:47.301782 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/43bf3674-37b4-47e7-afa4-4d441c477cdc-utilities\") pod \"redhat-operators-ldgqc\" (UID: \"43bf3674-37b4-47e7-afa4-4d441c477cdc\") " pod="openshift-marketplace/redhat-operators-ldgqc" Jan 21 16:09:47 crc kubenswrapper[5021]: I0121 16:09:47.301836 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/43bf3674-37b4-47e7-afa4-4d441c477cdc-catalog-content\") pod \"redhat-operators-ldgqc\" (UID: \"43bf3674-37b4-47e7-afa4-4d441c477cdc\") " pod="openshift-marketplace/redhat-operators-ldgqc" Jan 21 16:09:47 crc kubenswrapper[5021]: I0121 16:09:47.302407 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/43bf3674-37b4-47e7-afa4-4d441c477cdc-utilities\") pod \"redhat-operators-ldgqc\" (UID: \"43bf3674-37b4-47e7-afa4-4d441c477cdc\") " pod="openshift-marketplace/redhat-operators-ldgqc" Jan 21 16:09:47 crc kubenswrapper[5021]: I0121 16:09:47.302482 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/43bf3674-37b4-47e7-afa4-4d441c477cdc-catalog-content\") pod \"redhat-operators-ldgqc\" (UID: \"43bf3674-37b4-47e7-afa4-4d441c477cdc\") " pod="openshift-marketplace/redhat-operators-ldgqc" Jan 21 16:09:47 crc kubenswrapper[5021]: I0121 16:09:47.323665 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mt9zz\" (UniqueName: \"kubernetes.io/projected/43bf3674-37b4-47e7-afa4-4d441c477cdc-kube-api-access-mt9zz\") pod \"redhat-operators-ldgqc\" (UID: \"43bf3674-37b4-47e7-afa4-4d441c477cdc\") " pod="openshift-marketplace/redhat-operators-ldgqc" Jan 21 16:09:47 crc kubenswrapper[5021]: I0121 16:09:47.386221 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ldgqc" Jan 21 16:09:47 crc kubenswrapper[5021]: I0121 16:09:47.878890 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-ldgqc"] Jan 21 16:09:48 crc kubenswrapper[5021]: I0121 16:09:48.593371 5021 generic.go:334] "Generic (PLEG): container finished" podID="43bf3674-37b4-47e7-afa4-4d441c477cdc" containerID="2bf511bc43510b51c67375fde2868b3a8e13fd1f5037e55f5eff5d03f400b196" exitCode=0 Jan 21 16:09:48 crc kubenswrapper[5021]: I0121 16:09:48.593417 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ldgqc" event={"ID":"43bf3674-37b4-47e7-afa4-4d441c477cdc","Type":"ContainerDied","Data":"2bf511bc43510b51c67375fde2868b3a8e13fd1f5037e55f5eff5d03f400b196"} Jan 21 16:09:48 crc kubenswrapper[5021]: I0121 16:09:48.593452 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ldgqc" event={"ID":"43bf3674-37b4-47e7-afa4-4d441c477cdc","Type":"ContainerStarted","Data":"675c807d5b03e3d0c9d03af3ed58acb7dc62ecc4e9dd5731a33e3ef4c5a0ecba"} Jan 21 16:09:48 crc kubenswrapper[5021]: I0121 16:09:48.595466 5021 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 21 16:09:50 crc kubenswrapper[5021]: I0121 16:09:50.608646 5021 generic.go:334] "Generic (PLEG): container finished" podID="43bf3674-37b4-47e7-afa4-4d441c477cdc" containerID="93299b59787d57875818eb59d43e15ca8a098d3bd3d096a6b922c4214c081326" exitCode=0 Jan 21 16:09:50 crc kubenswrapper[5021]: I0121 16:09:50.608736 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ldgqc" event={"ID":"43bf3674-37b4-47e7-afa4-4d441c477cdc","Type":"ContainerDied","Data":"93299b59787d57875818eb59d43e15ca8a098d3bd3d096a6b922c4214c081326"} Jan 21 16:09:51 crc kubenswrapper[5021]: I0121 16:09:51.621483 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ldgqc" event={"ID":"43bf3674-37b4-47e7-afa4-4d441c477cdc","Type":"ContainerStarted","Data":"c5f2a33f07875fb97230f1f9b6c1df9813c1e9725b4e721a2be6fc5f88fa7ad0"} Jan 21 16:09:51 crc kubenswrapper[5021]: I0121 16:09:51.648352 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-ldgqc" podStartSLOduration=1.98388542 podStartE2EDuration="4.648325165s" podCreationTimestamp="2026-01-21 16:09:47 +0000 UTC" firstStartedPulling="2026-01-21 16:09:48.595172585 +0000 UTC m=+2730.130286474" lastFinishedPulling="2026-01-21 16:09:51.25961233 +0000 UTC m=+2732.794726219" observedRunningTime="2026-01-21 16:09:51.639158982 +0000 UTC m=+2733.174272881" watchObservedRunningTime="2026-01-21 16:09:51.648325165 +0000 UTC m=+2733.183439074" Jan 21 16:09:57 crc kubenswrapper[5021]: I0121 16:09:57.387235 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-ldgqc" Jan 21 16:09:57 crc kubenswrapper[5021]: I0121 16:09:57.388842 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-ldgqc" Jan 21 16:09:57 crc kubenswrapper[5021]: I0121 16:09:57.434990 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-ldgqc" Jan 21 16:09:57 crc kubenswrapper[5021]: I0121 16:09:57.699824 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-ldgqc" Jan 21 16:09:57 crc kubenswrapper[5021]: I0121 16:09:57.744609 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-ldgqc"] Jan 21 16:09:59 crc kubenswrapper[5021]: I0121 16:09:59.670320 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-ldgqc" podUID="43bf3674-37b4-47e7-afa4-4d441c477cdc" containerName="registry-server" containerID="cri-o://c5f2a33f07875fb97230f1f9b6c1df9813c1e9725b4e721a2be6fc5f88fa7ad0" gracePeriod=2 Jan 21 16:10:00 crc kubenswrapper[5021]: I0121 16:10:00.681027 5021 generic.go:334] "Generic (PLEG): container finished" podID="43bf3674-37b4-47e7-afa4-4d441c477cdc" containerID="c5f2a33f07875fb97230f1f9b6c1df9813c1e9725b4e721a2be6fc5f88fa7ad0" exitCode=0 Jan 21 16:10:00 crc kubenswrapper[5021]: I0121 16:10:00.681079 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ldgqc" event={"ID":"43bf3674-37b4-47e7-afa4-4d441c477cdc","Type":"ContainerDied","Data":"c5f2a33f07875fb97230f1f9b6c1df9813c1e9725b4e721a2be6fc5f88fa7ad0"} Jan 21 16:10:02 crc kubenswrapper[5021]: I0121 16:10:02.108214 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ldgqc" Jan 21 16:10:02 crc kubenswrapper[5021]: I0121 16:10:02.238763 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/43bf3674-37b4-47e7-afa4-4d441c477cdc-catalog-content\") pod \"43bf3674-37b4-47e7-afa4-4d441c477cdc\" (UID: \"43bf3674-37b4-47e7-afa4-4d441c477cdc\") " Jan 21 16:10:02 crc kubenswrapper[5021]: I0121 16:10:02.238859 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mt9zz\" (UniqueName: \"kubernetes.io/projected/43bf3674-37b4-47e7-afa4-4d441c477cdc-kube-api-access-mt9zz\") pod \"43bf3674-37b4-47e7-afa4-4d441c477cdc\" (UID: \"43bf3674-37b4-47e7-afa4-4d441c477cdc\") " Jan 21 16:10:02 crc kubenswrapper[5021]: I0121 16:10:02.239015 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/43bf3674-37b4-47e7-afa4-4d441c477cdc-utilities\") pod \"43bf3674-37b4-47e7-afa4-4d441c477cdc\" (UID: \"43bf3674-37b4-47e7-afa4-4d441c477cdc\") " Jan 21 16:10:02 crc kubenswrapper[5021]: I0121 16:10:02.240004 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/43bf3674-37b4-47e7-afa4-4d441c477cdc-utilities" (OuterVolumeSpecName: "utilities") pod "43bf3674-37b4-47e7-afa4-4d441c477cdc" (UID: "43bf3674-37b4-47e7-afa4-4d441c477cdc"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 16:10:02 crc kubenswrapper[5021]: I0121 16:10:02.244538 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43bf3674-37b4-47e7-afa4-4d441c477cdc-kube-api-access-mt9zz" (OuterVolumeSpecName: "kube-api-access-mt9zz") pod "43bf3674-37b4-47e7-afa4-4d441c477cdc" (UID: "43bf3674-37b4-47e7-afa4-4d441c477cdc"). InnerVolumeSpecName "kube-api-access-mt9zz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 16:10:02 crc kubenswrapper[5021]: I0121 16:10:02.341118 5021 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/43bf3674-37b4-47e7-afa4-4d441c477cdc-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 16:10:02 crc kubenswrapper[5021]: I0121 16:10:02.341159 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mt9zz\" (UniqueName: \"kubernetes.io/projected/43bf3674-37b4-47e7-afa4-4d441c477cdc-kube-api-access-mt9zz\") on node \"crc\" DevicePath \"\"" Jan 21 16:10:02 crc kubenswrapper[5021]: I0121 16:10:02.362325 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/43bf3674-37b4-47e7-afa4-4d441c477cdc-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "43bf3674-37b4-47e7-afa4-4d441c477cdc" (UID: "43bf3674-37b4-47e7-afa4-4d441c477cdc"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 16:10:02 crc kubenswrapper[5021]: I0121 16:10:02.442805 5021 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/43bf3674-37b4-47e7-afa4-4d441c477cdc-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 16:10:02 crc kubenswrapper[5021]: I0121 16:10:02.695898 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ldgqc" event={"ID":"43bf3674-37b4-47e7-afa4-4d441c477cdc","Type":"ContainerDied","Data":"675c807d5b03e3d0c9d03af3ed58acb7dc62ecc4e9dd5731a33e3ef4c5a0ecba"} Jan 21 16:10:02 crc kubenswrapper[5021]: I0121 16:10:02.696261 5021 scope.go:117] "RemoveContainer" containerID="c5f2a33f07875fb97230f1f9b6c1df9813c1e9725b4e721a2be6fc5f88fa7ad0" Jan 21 16:10:02 crc kubenswrapper[5021]: I0121 16:10:02.695961 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ldgqc" Jan 21 16:10:02 crc kubenswrapper[5021]: I0121 16:10:02.715893 5021 scope.go:117] "RemoveContainer" containerID="93299b59787d57875818eb59d43e15ca8a098d3bd3d096a6b922c4214c081326" Jan 21 16:10:02 crc kubenswrapper[5021]: I0121 16:10:02.731782 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-ldgqc"] Jan 21 16:10:02 crc kubenswrapper[5021]: I0121 16:10:02.737123 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-ldgqc"] Jan 21 16:10:02 crc kubenswrapper[5021]: I0121 16:10:02.749036 5021 scope.go:117] "RemoveContainer" containerID="2bf511bc43510b51c67375fde2868b3a8e13fd1f5037e55f5eff5d03f400b196" Jan 21 16:10:02 crc kubenswrapper[5021]: I0121 16:10:02.750832 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43bf3674-37b4-47e7-afa4-4d441c477cdc" path="/var/lib/kubelet/pods/43bf3674-37b4-47e7-afa4-4d441c477cdc/volumes" Jan 21 16:10:12 crc kubenswrapper[5021]: I0121 16:10:12.357488 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 16:10:12 crc kubenswrapper[5021]: I0121 16:10:12.358189 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 16:10:42 crc kubenswrapper[5021]: I0121 16:10:42.356972 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 16:10:42 crc kubenswrapper[5021]: I0121 16:10:42.357543 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 16:11:12 crc kubenswrapper[5021]: I0121 16:11:12.357423 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 16:11:12 crc kubenswrapper[5021]: I0121 16:11:12.358067 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 16:11:12 crc kubenswrapper[5021]: I0121 16:11:12.358134 5021 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" Jan 21 16:11:12 crc kubenswrapper[5021]: I0121 16:11:12.358968 5021 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"cbc78d8873867ec620819f7a63bde2e1fd0fdabfa4bf3bc1ed623bc7db2de3b8"} pod="openshift-machine-config-operator/machine-config-daemon-n22xz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 21 16:11:12 crc kubenswrapper[5021]: I0121 16:11:12.359052 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" containerID="cri-o://cbc78d8873867ec620819f7a63bde2e1fd0fdabfa4bf3bc1ed623bc7db2de3b8" gracePeriod=600 Jan 21 16:11:13 crc kubenswrapper[5021]: I0121 16:11:13.163998 5021 generic.go:334] "Generic (PLEG): container finished" podID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerID="cbc78d8873867ec620819f7a63bde2e1fd0fdabfa4bf3bc1ed623bc7db2de3b8" exitCode=0 Jan 21 16:11:13 crc kubenswrapper[5021]: I0121 16:11:13.164065 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" event={"ID":"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1","Type":"ContainerDied","Data":"cbc78d8873867ec620819f7a63bde2e1fd0fdabfa4bf3bc1ed623bc7db2de3b8"} Jan 21 16:11:13 crc kubenswrapper[5021]: I0121 16:11:13.164457 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" event={"ID":"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1","Type":"ContainerStarted","Data":"2a5bf6c88d4b8d2708dda3357b66e3937028a12d3565d50243ef6118f5036867"} Jan 21 16:11:13 crc kubenswrapper[5021]: I0121 16:11:13.164487 5021 scope.go:117] "RemoveContainer" containerID="3fdde0f968b05ba58c96e801a9ae3037ca772323be7d0ee8cdd6ab413b75a18b" Jan 21 16:11:44 crc kubenswrapper[5021]: I0121 16:11:44.766840 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-gglkn"] Jan 21 16:11:44 crc kubenswrapper[5021]: E0121 16:11:44.767644 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43bf3674-37b4-47e7-afa4-4d441c477cdc" containerName="extract-content" Jan 21 16:11:44 crc kubenswrapper[5021]: I0121 16:11:44.767659 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="43bf3674-37b4-47e7-afa4-4d441c477cdc" containerName="extract-content" Jan 21 16:11:44 crc kubenswrapper[5021]: E0121 16:11:44.767685 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43bf3674-37b4-47e7-afa4-4d441c477cdc" containerName="extract-utilities" Jan 21 16:11:44 crc kubenswrapper[5021]: I0121 16:11:44.767691 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="43bf3674-37b4-47e7-afa4-4d441c477cdc" containerName="extract-utilities" Jan 21 16:11:44 crc kubenswrapper[5021]: E0121 16:11:44.767703 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43bf3674-37b4-47e7-afa4-4d441c477cdc" containerName="registry-server" Jan 21 16:11:44 crc kubenswrapper[5021]: I0121 16:11:44.767709 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="43bf3674-37b4-47e7-afa4-4d441c477cdc" containerName="registry-server" Jan 21 16:11:44 crc kubenswrapper[5021]: I0121 16:11:44.767832 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="43bf3674-37b4-47e7-afa4-4d441c477cdc" containerName="registry-server" Jan 21 16:11:44 crc kubenswrapper[5021]: I0121 16:11:44.768883 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gglkn" Jan 21 16:11:44 crc kubenswrapper[5021]: I0121 16:11:44.778222 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-gglkn"] Jan 21 16:11:44 crc kubenswrapper[5021]: I0121 16:11:44.917542 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f18b946b-0145-41b9-a840-55b7f6c7d037-catalog-content\") pod \"redhat-marketplace-gglkn\" (UID: \"f18b946b-0145-41b9-a840-55b7f6c7d037\") " pod="openshift-marketplace/redhat-marketplace-gglkn" Jan 21 16:11:44 crc kubenswrapper[5021]: I0121 16:11:44.917870 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sbfdw\" (UniqueName: \"kubernetes.io/projected/f18b946b-0145-41b9-a840-55b7f6c7d037-kube-api-access-sbfdw\") pod \"redhat-marketplace-gglkn\" (UID: \"f18b946b-0145-41b9-a840-55b7f6c7d037\") " pod="openshift-marketplace/redhat-marketplace-gglkn" Jan 21 16:11:44 crc kubenswrapper[5021]: I0121 16:11:44.918099 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f18b946b-0145-41b9-a840-55b7f6c7d037-utilities\") pod \"redhat-marketplace-gglkn\" (UID: \"f18b946b-0145-41b9-a840-55b7f6c7d037\") " pod="openshift-marketplace/redhat-marketplace-gglkn" Jan 21 16:11:45 crc kubenswrapper[5021]: I0121 16:11:45.019553 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f18b946b-0145-41b9-a840-55b7f6c7d037-catalog-content\") pod \"redhat-marketplace-gglkn\" (UID: \"f18b946b-0145-41b9-a840-55b7f6c7d037\") " pod="openshift-marketplace/redhat-marketplace-gglkn" Jan 21 16:11:45 crc kubenswrapper[5021]: I0121 16:11:45.019670 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sbfdw\" (UniqueName: \"kubernetes.io/projected/f18b946b-0145-41b9-a840-55b7f6c7d037-kube-api-access-sbfdw\") pod \"redhat-marketplace-gglkn\" (UID: \"f18b946b-0145-41b9-a840-55b7f6c7d037\") " pod="openshift-marketplace/redhat-marketplace-gglkn" Jan 21 16:11:45 crc kubenswrapper[5021]: I0121 16:11:45.019708 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f18b946b-0145-41b9-a840-55b7f6c7d037-utilities\") pod \"redhat-marketplace-gglkn\" (UID: \"f18b946b-0145-41b9-a840-55b7f6c7d037\") " pod="openshift-marketplace/redhat-marketplace-gglkn" Jan 21 16:11:45 crc kubenswrapper[5021]: I0121 16:11:45.020189 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f18b946b-0145-41b9-a840-55b7f6c7d037-catalog-content\") pod \"redhat-marketplace-gglkn\" (UID: \"f18b946b-0145-41b9-a840-55b7f6c7d037\") " pod="openshift-marketplace/redhat-marketplace-gglkn" Jan 21 16:11:45 crc kubenswrapper[5021]: I0121 16:11:45.020280 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f18b946b-0145-41b9-a840-55b7f6c7d037-utilities\") pod \"redhat-marketplace-gglkn\" (UID: \"f18b946b-0145-41b9-a840-55b7f6c7d037\") " pod="openshift-marketplace/redhat-marketplace-gglkn" Jan 21 16:11:45 crc kubenswrapper[5021]: I0121 16:11:45.040524 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sbfdw\" (UniqueName: \"kubernetes.io/projected/f18b946b-0145-41b9-a840-55b7f6c7d037-kube-api-access-sbfdw\") pod \"redhat-marketplace-gglkn\" (UID: \"f18b946b-0145-41b9-a840-55b7f6c7d037\") " pod="openshift-marketplace/redhat-marketplace-gglkn" Jan 21 16:11:45 crc kubenswrapper[5021]: I0121 16:11:45.096296 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gglkn" Jan 21 16:11:45 crc kubenswrapper[5021]: I0121 16:11:45.549983 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-gglkn"] Jan 21 16:11:46 crc kubenswrapper[5021]: I0121 16:11:46.425207 5021 generic.go:334] "Generic (PLEG): container finished" podID="f18b946b-0145-41b9-a840-55b7f6c7d037" containerID="bfb38288673e1982121c859089567e1d520345a68790f555b2be265c855387b3" exitCode=0 Jan 21 16:11:46 crc kubenswrapper[5021]: I0121 16:11:46.425250 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gglkn" event={"ID":"f18b946b-0145-41b9-a840-55b7f6c7d037","Type":"ContainerDied","Data":"bfb38288673e1982121c859089567e1d520345a68790f555b2be265c855387b3"} Jan 21 16:11:46 crc kubenswrapper[5021]: I0121 16:11:46.425280 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gglkn" event={"ID":"f18b946b-0145-41b9-a840-55b7f6c7d037","Type":"ContainerStarted","Data":"9b460ead39bcfba173e8a1089056e38c332d1c607e43ec18f856dd6cf7674811"} Jan 21 16:11:47 crc kubenswrapper[5021]: I0121 16:11:47.433741 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gglkn" event={"ID":"f18b946b-0145-41b9-a840-55b7f6c7d037","Type":"ContainerStarted","Data":"d10ed972ce33ec161b812e174d72500005dadd3f6bf61b1675afa2f2a810f958"} Jan 21 16:11:48 crc kubenswrapper[5021]: I0121 16:11:48.442309 5021 generic.go:334] "Generic (PLEG): container finished" podID="f18b946b-0145-41b9-a840-55b7f6c7d037" containerID="d10ed972ce33ec161b812e174d72500005dadd3f6bf61b1675afa2f2a810f958" exitCode=0 Jan 21 16:11:48 crc kubenswrapper[5021]: I0121 16:11:48.442396 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gglkn" event={"ID":"f18b946b-0145-41b9-a840-55b7f6c7d037","Type":"ContainerDied","Data":"d10ed972ce33ec161b812e174d72500005dadd3f6bf61b1675afa2f2a810f958"} Jan 21 16:11:48 crc kubenswrapper[5021]: I0121 16:11:48.442674 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gglkn" event={"ID":"f18b946b-0145-41b9-a840-55b7f6c7d037","Type":"ContainerStarted","Data":"b2f2dac24504435c2a2038f9b760afa1635ec1fecdc644d1a8b96ae716026586"} Jan 21 16:11:48 crc kubenswrapper[5021]: I0121 16:11:48.461983 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-gglkn" podStartSLOduration=2.926844377 podStartE2EDuration="4.461964363s" podCreationTimestamp="2026-01-21 16:11:44 +0000 UTC" firstStartedPulling="2026-01-21 16:11:46.426823866 +0000 UTC m=+2847.961937755" lastFinishedPulling="2026-01-21 16:11:47.961943852 +0000 UTC m=+2849.497057741" observedRunningTime="2026-01-21 16:11:48.455869061 +0000 UTC m=+2849.990982970" watchObservedRunningTime="2026-01-21 16:11:48.461964363 +0000 UTC m=+2849.997078252" Jan 21 16:11:55 crc kubenswrapper[5021]: I0121 16:11:55.096737 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-gglkn" Jan 21 16:11:55 crc kubenswrapper[5021]: I0121 16:11:55.098551 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-gglkn" Jan 21 16:11:55 crc kubenswrapper[5021]: I0121 16:11:55.202376 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-gglkn" Jan 21 16:11:55 crc kubenswrapper[5021]: I0121 16:11:55.528444 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-gglkn" Jan 21 16:11:55 crc kubenswrapper[5021]: I0121 16:11:55.575737 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-gglkn"] Jan 21 16:11:57 crc kubenswrapper[5021]: I0121 16:11:57.500988 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-gglkn" podUID="f18b946b-0145-41b9-a840-55b7f6c7d037" containerName="registry-server" containerID="cri-o://b2f2dac24504435c2a2038f9b760afa1635ec1fecdc644d1a8b96ae716026586" gracePeriod=2 Jan 21 16:11:57 crc kubenswrapper[5021]: I0121 16:11:57.894011 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gglkn" Jan 21 16:11:58 crc kubenswrapper[5021]: I0121 16:11:58.000754 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f18b946b-0145-41b9-a840-55b7f6c7d037-catalog-content\") pod \"f18b946b-0145-41b9-a840-55b7f6c7d037\" (UID: \"f18b946b-0145-41b9-a840-55b7f6c7d037\") " Jan 21 16:11:58 crc kubenswrapper[5021]: I0121 16:11:58.000973 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sbfdw\" (UniqueName: \"kubernetes.io/projected/f18b946b-0145-41b9-a840-55b7f6c7d037-kube-api-access-sbfdw\") pod \"f18b946b-0145-41b9-a840-55b7f6c7d037\" (UID: \"f18b946b-0145-41b9-a840-55b7f6c7d037\") " Jan 21 16:11:58 crc kubenswrapper[5021]: I0121 16:11:58.001034 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f18b946b-0145-41b9-a840-55b7f6c7d037-utilities\") pod \"f18b946b-0145-41b9-a840-55b7f6c7d037\" (UID: \"f18b946b-0145-41b9-a840-55b7f6c7d037\") " Jan 21 16:11:58 crc kubenswrapper[5021]: I0121 16:11:58.002591 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f18b946b-0145-41b9-a840-55b7f6c7d037-utilities" (OuterVolumeSpecName: "utilities") pod "f18b946b-0145-41b9-a840-55b7f6c7d037" (UID: "f18b946b-0145-41b9-a840-55b7f6c7d037"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 16:11:58 crc kubenswrapper[5021]: I0121 16:11:58.009726 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f18b946b-0145-41b9-a840-55b7f6c7d037-kube-api-access-sbfdw" (OuterVolumeSpecName: "kube-api-access-sbfdw") pod "f18b946b-0145-41b9-a840-55b7f6c7d037" (UID: "f18b946b-0145-41b9-a840-55b7f6c7d037"). InnerVolumeSpecName "kube-api-access-sbfdw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 16:11:58 crc kubenswrapper[5021]: I0121 16:11:58.026322 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f18b946b-0145-41b9-a840-55b7f6c7d037-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f18b946b-0145-41b9-a840-55b7f6c7d037" (UID: "f18b946b-0145-41b9-a840-55b7f6c7d037"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 16:11:58 crc kubenswrapper[5021]: I0121 16:11:58.103648 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sbfdw\" (UniqueName: \"kubernetes.io/projected/f18b946b-0145-41b9-a840-55b7f6c7d037-kube-api-access-sbfdw\") on node \"crc\" DevicePath \"\"" Jan 21 16:11:58 crc kubenswrapper[5021]: I0121 16:11:58.103695 5021 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f18b946b-0145-41b9-a840-55b7f6c7d037-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 16:11:58 crc kubenswrapper[5021]: I0121 16:11:58.103708 5021 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f18b946b-0145-41b9-a840-55b7f6c7d037-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 16:11:58 crc kubenswrapper[5021]: I0121 16:11:58.512450 5021 generic.go:334] "Generic (PLEG): container finished" podID="f18b946b-0145-41b9-a840-55b7f6c7d037" containerID="b2f2dac24504435c2a2038f9b760afa1635ec1fecdc644d1a8b96ae716026586" exitCode=0 Jan 21 16:11:58 crc kubenswrapper[5021]: I0121 16:11:58.512530 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gglkn" Jan 21 16:11:58 crc kubenswrapper[5021]: I0121 16:11:58.512536 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gglkn" event={"ID":"f18b946b-0145-41b9-a840-55b7f6c7d037","Type":"ContainerDied","Data":"b2f2dac24504435c2a2038f9b760afa1635ec1fecdc644d1a8b96ae716026586"} Jan 21 16:11:58 crc kubenswrapper[5021]: I0121 16:11:58.512613 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gglkn" event={"ID":"f18b946b-0145-41b9-a840-55b7f6c7d037","Type":"ContainerDied","Data":"9b460ead39bcfba173e8a1089056e38c332d1c607e43ec18f856dd6cf7674811"} Jan 21 16:11:58 crc kubenswrapper[5021]: I0121 16:11:58.512643 5021 scope.go:117] "RemoveContainer" containerID="b2f2dac24504435c2a2038f9b760afa1635ec1fecdc644d1a8b96ae716026586" Jan 21 16:11:58 crc kubenswrapper[5021]: I0121 16:11:58.540310 5021 scope.go:117] "RemoveContainer" containerID="d10ed972ce33ec161b812e174d72500005dadd3f6bf61b1675afa2f2a810f958" Jan 21 16:11:58 crc kubenswrapper[5021]: I0121 16:11:58.561655 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-gglkn"] Jan 21 16:11:58 crc kubenswrapper[5021]: I0121 16:11:58.569683 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-gglkn"] Jan 21 16:11:58 crc kubenswrapper[5021]: I0121 16:11:58.582285 5021 scope.go:117] "RemoveContainer" containerID="bfb38288673e1982121c859089567e1d520345a68790f555b2be265c855387b3" Jan 21 16:11:58 crc kubenswrapper[5021]: I0121 16:11:58.601644 5021 scope.go:117] "RemoveContainer" containerID="b2f2dac24504435c2a2038f9b760afa1635ec1fecdc644d1a8b96ae716026586" Jan 21 16:11:58 crc kubenswrapper[5021]: E0121 16:11:58.602107 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b2f2dac24504435c2a2038f9b760afa1635ec1fecdc644d1a8b96ae716026586\": container with ID starting with b2f2dac24504435c2a2038f9b760afa1635ec1fecdc644d1a8b96ae716026586 not found: ID does not exist" containerID="b2f2dac24504435c2a2038f9b760afa1635ec1fecdc644d1a8b96ae716026586" Jan 21 16:11:58 crc kubenswrapper[5021]: I0121 16:11:58.602141 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b2f2dac24504435c2a2038f9b760afa1635ec1fecdc644d1a8b96ae716026586"} err="failed to get container status \"b2f2dac24504435c2a2038f9b760afa1635ec1fecdc644d1a8b96ae716026586\": rpc error: code = NotFound desc = could not find container \"b2f2dac24504435c2a2038f9b760afa1635ec1fecdc644d1a8b96ae716026586\": container with ID starting with b2f2dac24504435c2a2038f9b760afa1635ec1fecdc644d1a8b96ae716026586 not found: ID does not exist" Jan 21 16:11:58 crc kubenswrapper[5021]: I0121 16:11:58.602165 5021 scope.go:117] "RemoveContainer" containerID="d10ed972ce33ec161b812e174d72500005dadd3f6bf61b1675afa2f2a810f958" Jan 21 16:11:58 crc kubenswrapper[5021]: E0121 16:11:58.602438 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d10ed972ce33ec161b812e174d72500005dadd3f6bf61b1675afa2f2a810f958\": container with ID starting with d10ed972ce33ec161b812e174d72500005dadd3f6bf61b1675afa2f2a810f958 not found: ID does not exist" containerID="d10ed972ce33ec161b812e174d72500005dadd3f6bf61b1675afa2f2a810f958" Jan 21 16:11:58 crc kubenswrapper[5021]: I0121 16:11:58.602464 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d10ed972ce33ec161b812e174d72500005dadd3f6bf61b1675afa2f2a810f958"} err="failed to get container status \"d10ed972ce33ec161b812e174d72500005dadd3f6bf61b1675afa2f2a810f958\": rpc error: code = NotFound desc = could not find container \"d10ed972ce33ec161b812e174d72500005dadd3f6bf61b1675afa2f2a810f958\": container with ID starting with d10ed972ce33ec161b812e174d72500005dadd3f6bf61b1675afa2f2a810f958 not found: ID does not exist" Jan 21 16:11:58 crc kubenswrapper[5021]: I0121 16:11:58.602476 5021 scope.go:117] "RemoveContainer" containerID="bfb38288673e1982121c859089567e1d520345a68790f555b2be265c855387b3" Jan 21 16:11:58 crc kubenswrapper[5021]: E0121 16:11:58.603059 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bfb38288673e1982121c859089567e1d520345a68790f555b2be265c855387b3\": container with ID starting with bfb38288673e1982121c859089567e1d520345a68790f555b2be265c855387b3 not found: ID does not exist" containerID="bfb38288673e1982121c859089567e1d520345a68790f555b2be265c855387b3" Jan 21 16:11:58 crc kubenswrapper[5021]: I0121 16:11:58.603082 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bfb38288673e1982121c859089567e1d520345a68790f555b2be265c855387b3"} err="failed to get container status \"bfb38288673e1982121c859089567e1d520345a68790f555b2be265c855387b3\": rpc error: code = NotFound desc = could not find container \"bfb38288673e1982121c859089567e1d520345a68790f555b2be265c855387b3\": container with ID starting with bfb38288673e1982121c859089567e1d520345a68790f555b2be265c855387b3 not found: ID does not exist" Jan 21 16:11:58 crc kubenswrapper[5021]: I0121 16:11:58.745898 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f18b946b-0145-41b9-a840-55b7f6c7d037" path="/var/lib/kubelet/pods/f18b946b-0145-41b9-a840-55b7f6c7d037/volumes" Jan 21 16:13:13 crc kubenswrapper[5021]: I0121 16:13:13.671785 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-bjdt5"] Jan 21 16:13:13 crc kubenswrapper[5021]: E0121 16:13:13.673525 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f18b946b-0145-41b9-a840-55b7f6c7d037" containerName="registry-server" Jan 21 16:13:13 crc kubenswrapper[5021]: I0121 16:13:13.673552 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="f18b946b-0145-41b9-a840-55b7f6c7d037" containerName="registry-server" Jan 21 16:13:13 crc kubenswrapper[5021]: E0121 16:13:13.673572 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f18b946b-0145-41b9-a840-55b7f6c7d037" containerName="extract-content" Jan 21 16:13:13 crc kubenswrapper[5021]: I0121 16:13:13.673585 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="f18b946b-0145-41b9-a840-55b7f6c7d037" containerName="extract-content" Jan 21 16:13:13 crc kubenswrapper[5021]: E0121 16:13:13.673609 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f18b946b-0145-41b9-a840-55b7f6c7d037" containerName="extract-utilities" Jan 21 16:13:13 crc kubenswrapper[5021]: I0121 16:13:13.673620 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="f18b946b-0145-41b9-a840-55b7f6c7d037" containerName="extract-utilities" Jan 21 16:13:13 crc kubenswrapper[5021]: I0121 16:13:13.673850 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="f18b946b-0145-41b9-a840-55b7f6c7d037" containerName="registry-server" Jan 21 16:13:13 crc kubenswrapper[5021]: I0121 16:13:13.675545 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bjdt5" Jan 21 16:13:13 crc kubenswrapper[5021]: I0121 16:13:13.697829 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-bjdt5"] Jan 21 16:13:13 crc kubenswrapper[5021]: I0121 16:13:13.790471 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6632b1ab-a199-4bda-bb26-c4cb71780bb0-utilities\") pod \"certified-operators-bjdt5\" (UID: \"6632b1ab-a199-4bda-bb26-c4cb71780bb0\") " pod="openshift-marketplace/certified-operators-bjdt5" Jan 21 16:13:13 crc kubenswrapper[5021]: I0121 16:13:13.790844 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6632b1ab-a199-4bda-bb26-c4cb71780bb0-catalog-content\") pod \"certified-operators-bjdt5\" (UID: \"6632b1ab-a199-4bda-bb26-c4cb71780bb0\") " pod="openshift-marketplace/certified-operators-bjdt5" Jan 21 16:13:13 crc kubenswrapper[5021]: I0121 16:13:13.791096 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5l8mh\" (UniqueName: \"kubernetes.io/projected/6632b1ab-a199-4bda-bb26-c4cb71780bb0-kube-api-access-5l8mh\") pod \"certified-operators-bjdt5\" (UID: \"6632b1ab-a199-4bda-bb26-c4cb71780bb0\") " pod="openshift-marketplace/certified-operators-bjdt5" Jan 21 16:13:13 crc kubenswrapper[5021]: I0121 16:13:13.892108 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6632b1ab-a199-4bda-bb26-c4cb71780bb0-utilities\") pod \"certified-operators-bjdt5\" (UID: \"6632b1ab-a199-4bda-bb26-c4cb71780bb0\") " pod="openshift-marketplace/certified-operators-bjdt5" Jan 21 16:13:13 crc kubenswrapper[5021]: I0121 16:13:13.892511 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6632b1ab-a199-4bda-bb26-c4cb71780bb0-catalog-content\") pod \"certified-operators-bjdt5\" (UID: \"6632b1ab-a199-4bda-bb26-c4cb71780bb0\") " pod="openshift-marketplace/certified-operators-bjdt5" Jan 21 16:13:13 crc kubenswrapper[5021]: I0121 16:13:13.892649 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5l8mh\" (UniqueName: \"kubernetes.io/projected/6632b1ab-a199-4bda-bb26-c4cb71780bb0-kube-api-access-5l8mh\") pod \"certified-operators-bjdt5\" (UID: \"6632b1ab-a199-4bda-bb26-c4cb71780bb0\") " pod="openshift-marketplace/certified-operators-bjdt5" Jan 21 16:13:13 crc kubenswrapper[5021]: I0121 16:13:13.892717 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6632b1ab-a199-4bda-bb26-c4cb71780bb0-utilities\") pod \"certified-operators-bjdt5\" (UID: \"6632b1ab-a199-4bda-bb26-c4cb71780bb0\") " pod="openshift-marketplace/certified-operators-bjdt5" Jan 21 16:13:13 crc kubenswrapper[5021]: I0121 16:13:13.893043 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6632b1ab-a199-4bda-bb26-c4cb71780bb0-catalog-content\") pod \"certified-operators-bjdt5\" (UID: \"6632b1ab-a199-4bda-bb26-c4cb71780bb0\") " pod="openshift-marketplace/certified-operators-bjdt5" Jan 21 16:13:13 crc kubenswrapper[5021]: I0121 16:13:13.914038 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5l8mh\" (UniqueName: \"kubernetes.io/projected/6632b1ab-a199-4bda-bb26-c4cb71780bb0-kube-api-access-5l8mh\") pod \"certified-operators-bjdt5\" (UID: \"6632b1ab-a199-4bda-bb26-c4cb71780bb0\") " pod="openshift-marketplace/certified-operators-bjdt5" Jan 21 16:13:14 crc kubenswrapper[5021]: I0121 16:13:14.000440 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bjdt5" Jan 21 16:13:14 crc kubenswrapper[5021]: I0121 16:13:14.418523 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-bjdt5"] Jan 21 16:13:15 crc kubenswrapper[5021]: I0121 16:13:15.025696 5021 generic.go:334] "Generic (PLEG): container finished" podID="6632b1ab-a199-4bda-bb26-c4cb71780bb0" containerID="4f087edba85b6e1c8bd72b13f4cca34018b6df25222129c111e7708248174975" exitCode=0 Jan 21 16:13:15 crc kubenswrapper[5021]: I0121 16:13:15.025797 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bjdt5" event={"ID":"6632b1ab-a199-4bda-bb26-c4cb71780bb0","Type":"ContainerDied","Data":"4f087edba85b6e1c8bd72b13f4cca34018b6df25222129c111e7708248174975"} Jan 21 16:13:15 crc kubenswrapper[5021]: I0121 16:13:15.025852 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bjdt5" event={"ID":"6632b1ab-a199-4bda-bb26-c4cb71780bb0","Type":"ContainerStarted","Data":"86380a43d076115ba06f4f951ab0e5fddf47cf32386252e282ad4c552a87d964"} Jan 21 16:13:15 crc kubenswrapper[5021]: I0121 16:13:15.467248 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-x8zrp"] Jan 21 16:13:15 crc kubenswrapper[5021]: I0121 16:13:15.469182 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-x8zrp" Jan 21 16:13:15 crc kubenswrapper[5021]: I0121 16:13:15.472779 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-x8zrp"] Jan 21 16:13:15 crc kubenswrapper[5021]: I0121 16:13:15.617431 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8-utilities\") pod \"community-operators-x8zrp\" (UID: \"4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8\") " pod="openshift-marketplace/community-operators-x8zrp" Jan 21 16:13:15 crc kubenswrapper[5021]: I0121 16:13:15.617593 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8-catalog-content\") pod \"community-operators-x8zrp\" (UID: \"4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8\") " pod="openshift-marketplace/community-operators-x8zrp" Jan 21 16:13:15 crc kubenswrapper[5021]: I0121 16:13:15.617687 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m8zlq\" (UniqueName: \"kubernetes.io/projected/4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8-kube-api-access-m8zlq\") pod \"community-operators-x8zrp\" (UID: \"4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8\") " pod="openshift-marketplace/community-operators-x8zrp" Jan 21 16:13:15 crc kubenswrapper[5021]: I0121 16:13:15.719036 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m8zlq\" (UniqueName: \"kubernetes.io/projected/4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8-kube-api-access-m8zlq\") pod \"community-operators-x8zrp\" (UID: \"4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8\") " pod="openshift-marketplace/community-operators-x8zrp" Jan 21 16:13:15 crc kubenswrapper[5021]: I0121 16:13:15.719103 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8-utilities\") pod \"community-operators-x8zrp\" (UID: \"4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8\") " pod="openshift-marketplace/community-operators-x8zrp" Jan 21 16:13:15 crc kubenswrapper[5021]: I0121 16:13:15.719183 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8-catalog-content\") pod \"community-operators-x8zrp\" (UID: \"4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8\") " pod="openshift-marketplace/community-operators-x8zrp" Jan 21 16:13:15 crc kubenswrapper[5021]: I0121 16:13:15.719815 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8-catalog-content\") pod \"community-operators-x8zrp\" (UID: \"4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8\") " pod="openshift-marketplace/community-operators-x8zrp" Jan 21 16:13:15 crc kubenswrapper[5021]: I0121 16:13:15.719936 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8-utilities\") pod \"community-operators-x8zrp\" (UID: \"4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8\") " pod="openshift-marketplace/community-operators-x8zrp" Jan 21 16:13:15 crc kubenswrapper[5021]: I0121 16:13:15.740892 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m8zlq\" (UniqueName: \"kubernetes.io/projected/4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8-kube-api-access-m8zlq\") pod \"community-operators-x8zrp\" (UID: \"4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8\") " pod="openshift-marketplace/community-operators-x8zrp" Jan 21 16:13:15 crc kubenswrapper[5021]: I0121 16:13:15.785708 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-x8zrp" Jan 21 16:13:16 crc kubenswrapper[5021]: I0121 16:13:16.301492 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-x8zrp"] Jan 21 16:13:17 crc kubenswrapper[5021]: I0121 16:13:17.039163 5021 generic.go:334] "Generic (PLEG): container finished" podID="4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8" containerID="1be7b70e4dcc897310860d46d199f2e84f0f6b471648c1a2390c28043aa9f642" exitCode=0 Jan 21 16:13:17 crc kubenswrapper[5021]: I0121 16:13:17.039290 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x8zrp" event={"ID":"4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8","Type":"ContainerDied","Data":"1be7b70e4dcc897310860d46d199f2e84f0f6b471648c1a2390c28043aa9f642"} Jan 21 16:13:17 crc kubenswrapper[5021]: I0121 16:13:17.039368 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x8zrp" event={"ID":"4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8","Type":"ContainerStarted","Data":"9e1c966dd0522451dc8653cd5d89fe73ab86ed52c3d2cc618e583053bae7fc05"} Jan 21 16:13:17 crc kubenswrapper[5021]: I0121 16:13:17.041109 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bjdt5" event={"ID":"6632b1ab-a199-4bda-bb26-c4cb71780bb0","Type":"ContainerStarted","Data":"39e0e8a6d9d880b31742a5e53c563b1d589b60d64267434cb274cab575439ab0"} Jan 21 16:13:18 crc kubenswrapper[5021]: I0121 16:13:18.048969 5021 generic.go:334] "Generic (PLEG): container finished" podID="4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8" containerID="79d0597fedd6bcee18ce68deb6b091e94baa7ba3185c7c60da9b95643688ba25" exitCode=0 Jan 21 16:13:18 crc kubenswrapper[5021]: I0121 16:13:18.049023 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x8zrp" event={"ID":"4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8","Type":"ContainerDied","Data":"79d0597fedd6bcee18ce68deb6b091e94baa7ba3185c7c60da9b95643688ba25"} Jan 21 16:13:18 crc kubenswrapper[5021]: I0121 16:13:18.051645 5021 generic.go:334] "Generic (PLEG): container finished" podID="6632b1ab-a199-4bda-bb26-c4cb71780bb0" containerID="39e0e8a6d9d880b31742a5e53c563b1d589b60d64267434cb274cab575439ab0" exitCode=0 Jan 21 16:13:18 crc kubenswrapper[5021]: I0121 16:13:18.051676 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bjdt5" event={"ID":"6632b1ab-a199-4bda-bb26-c4cb71780bb0","Type":"ContainerDied","Data":"39e0e8a6d9d880b31742a5e53c563b1d589b60d64267434cb274cab575439ab0"} Jan 21 16:13:19 crc kubenswrapper[5021]: I0121 16:13:19.058753 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x8zrp" event={"ID":"4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8","Type":"ContainerStarted","Data":"e5631ebc63d40c45b91dec77309c3d21f98809ac40e58e233116ab191ae71e1b"} Jan 21 16:13:19 crc kubenswrapper[5021]: I0121 16:13:19.060795 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bjdt5" event={"ID":"6632b1ab-a199-4bda-bb26-c4cb71780bb0","Type":"ContainerStarted","Data":"de1331e654b562a64d95c45c7820bcbc587030867eede28553e3acdec3bd7ed1"} Jan 21 16:13:19 crc kubenswrapper[5021]: I0121 16:13:19.079979 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-x8zrp" podStartSLOduration=2.5716680370000002 podStartE2EDuration="4.079959373s" podCreationTimestamp="2026-01-21 16:13:15 +0000 UTC" firstStartedPulling="2026-01-21 16:13:17.041256633 +0000 UTC m=+2938.576370522" lastFinishedPulling="2026-01-21 16:13:18.549547969 +0000 UTC m=+2940.084661858" observedRunningTime="2026-01-21 16:13:19.074517799 +0000 UTC m=+2940.609631698" watchObservedRunningTime="2026-01-21 16:13:19.079959373 +0000 UTC m=+2940.615073262" Jan 21 16:13:19 crc kubenswrapper[5021]: I0121 16:13:19.094684 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-bjdt5" podStartSLOduration=2.540313551 podStartE2EDuration="6.094662052s" podCreationTimestamp="2026-01-21 16:13:13 +0000 UTC" firstStartedPulling="2026-01-21 16:13:15.027483383 +0000 UTC m=+2936.562597272" lastFinishedPulling="2026-01-21 16:13:18.581831894 +0000 UTC m=+2940.116945773" observedRunningTime="2026-01-21 16:13:19.091686453 +0000 UTC m=+2940.626800362" watchObservedRunningTime="2026-01-21 16:13:19.094662052 +0000 UTC m=+2940.629775961" Jan 21 16:13:24 crc kubenswrapper[5021]: I0121 16:13:24.002109 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-bjdt5" Jan 21 16:13:24 crc kubenswrapper[5021]: I0121 16:13:24.002403 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-bjdt5" Jan 21 16:13:24 crc kubenswrapper[5021]: I0121 16:13:24.044001 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-bjdt5" Jan 21 16:13:24 crc kubenswrapper[5021]: I0121 16:13:24.128893 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-bjdt5" Jan 21 16:13:24 crc kubenswrapper[5021]: I0121 16:13:24.280721 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-bjdt5"] Jan 21 16:13:25 crc kubenswrapper[5021]: I0121 16:13:25.786276 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-x8zrp" Jan 21 16:13:25 crc kubenswrapper[5021]: I0121 16:13:25.786352 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-x8zrp" Jan 21 16:13:25 crc kubenswrapper[5021]: I0121 16:13:25.823866 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-x8zrp" Jan 21 16:13:26 crc kubenswrapper[5021]: I0121 16:13:26.105945 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-bjdt5" podUID="6632b1ab-a199-4bda-bb26-c4cb71780bb0" containerName="registry-server" containerID="cri-o://de1331e654b562a64d95c45c7820bcbc587030867eede28553e3acdec3bd7ed1" gracePeriod=2 Jan 21 16:13:26 crc kubenswrapper[5021]: I0121 16:13:26.147014 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-x8zrp" Jan 21 16:13:26 crc kubenswrapper[5021]: I0121 16:13:26.681650 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-x8zrp"] Jan 21 16:13:27 crc kubenswrapper[5021]: I0121 16:13:27.008963 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bjdt5" Jan 21 16:13:27 crc kubenswrapper[5021]: I0121 16:13:27.115661 5021 generic.go:334] "Generic (PLEG): container finished" podID="6632b1ab-a199-4bda-bb26-c4cb71780bb0" containerID="de1331e654b562a64d95c45c7820bcbc587030867eede28553e3acdec3bd7ed1" exitCode=0 Jan 21 16:13:27 crc kubenswrapper[5021]: I0121 16:13:27.115739 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bjdt5" Jan 21 16:13:27 crc kubenswrapper[5021]: I0121 16:13:27.115768 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bjdt5" event={"ID":"6632b1ab-a199-4bda-bb26-c4cb71780bb0","Type":"ContainerDied","Data":"de1331e654b562a64d95c45c7820bcbc587030867eede28553e3acdec3bd7ed1"} Jan 21 16:13:27 crc kubenswrapper[5021]: I0121 16:13:27.115831 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bjdt5" event={"ID":"6632b1ab-a199-4bda-bb26-c4cb71780bb0","Type":"ContainerDied","Data":"86380a43d076115ba06f4f951ab0e5fddf47cf32386252e282ad4c552a87d964"} Jan 21 16:13:27 crc kubenswrapper[5021]: I0121 16:13:27.115856 5021 scope.go:117] "RemoveContainer" containerID="de1331e654b562a64d95c45c7820bcbc587030867eede28553e3acdec3bd7ed1" Jan 21 16:13:27 crc kubenswrapper[5021]: I0121 16:13:27.133105 5021 scope.go:117] "RemoveContainer" containerID="39e0e8a6d9d880b31742a5e53c563b1d589b60d64267434cb274cab575439ab0" Jan 21 16:13:27 crc kubenswrapper[5021]: I0121 16:13:27.156431 5021 scope.go:117] "RemoveContainer" containerID="4f087edba85b6e1c8bd72b13f4cca34018b6df25222129c111e7708248174975" Jan 21 16:13:27 crc kubenswrapper[5021]: I0121 16:13:27.175358 5021 scope.go:117] "RemoveContainer" containerID="de1331e654b562a64d95c45c7820bcbc587030867eede28553e3acdec3bd7ed1" Jan 21 16:13:27 crc kubenswrapper[5021]: E0121 16:13:27.175808 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"de1331e654b562a64d95c45c7820bcbc587030867eede28553e3acdec3bd7ed1\": container with ID starting with de1331e654b562a64d95c45c7820bcbc587030867eede28553e3acdec3bd7ed1 not found: ID does not exist" containerID="de1331e654b562a64d95c45c7820bcbc587030867eede28553e3acdec3bd7ed1" Jan 21 16:13:27 crc kubenswrapper[5021]: I0121 16:13:27.175838 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"de1331e654b562a64d95c45c7820bcbc587030867eede28553e3acdec3bd7ed1"} err="failed to get container status \"de1331e654b562a64d95c45c7820bcbc587030867eede28553e3acdec3bd7ed1\": rpc error: code = NotFound desc = could not find container \"de1331e654b562a64d95c45c7820bcbc587030867eede28553e3acdec3bd7ed1\": container with ID starting with de1331e654b562a64d95c45c7820bcbc587030867eede28553e3acdec3bd7ed1 not found: ID does not exist" Jan 21 16:13:27 crc kubenswrapper[5021]: I0121 16:13:27.175861 5021 scope.go:117] "RemoveContainer" containerID="39e0e8a6d9d880b31742a5e53c563b1d589b60d64267434cb274cab575439ab0" Jan 21 16:13:27 crc kubenswrapper[5021]: E0121 16:13:27.176206 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"39e0e8a6d9d880b31742a5e53c563b1d589b60d64267434cb274cab575439ab0\": container with ID starting with 39e0e8a6d9d880b31742a5e53c563b1d589b60d64267434cb274cab575439ab0 not found: ID does not exist" containerID="39e0e8a6d9d880b31742a5e53c563b1d589b60d64267434cb274cab575439ab0" Jan 21 16:13:27 crc kubenswrapper[5021]: I0121 16:13:27.176234 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"39e0e8a6d9d880b31742a5e53c563b1d589b60d64267434cb274cab575439ab0"} err="failed to get container status \"39e0e8a6d9d880b31742a5e53c563b1d589b60d64267434cb274cab575439ab0\": rpc error: code = NotFound desc = could not find container \"39e0e8a6d9d880b31742a5e53c563b1d589b60d64267434cb274cab575439ab0\": container with ID starting with 39e0e8a6d9d880b31742a5e53c563b1d589b60d64267434cb274cab575439ab0 not found: ID does not exist" Jan 21 16:13:27 crc kubenswrapper[5021]: I0121 16:13:27.176249 5021 scope.go:117] "RemoveContainer" containerID="4f087edba85b6e1c8bd72b13f4cca34018b6df25222129c111e7708248174975" Jan 21 16:13:27 crc kubenswrapper[5021]: E0121 16:13:27.176555 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4f087edba85b6e1c8bd72b13f4cca34018b6df25222129c111e7708248174975\": container with ID starting with 4f087edba85b6e1c8bd72b13f4cca34018b6df25222129c111e7708248174975 not found: ID does not exist" containerID="4f087edba85b6e1c8bd72b13f4cca34018b6df25222129c111e7708248174975" Jan 21 16:13:27 crc kubenswrapper[5021]: I0121 16:13:27.176613 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4f087edba85b6e1c8bd72b13f4cca34018b6df25222129c111e7708248174975"} err="failed to get container status \"4f087edba85b6e1c8bd72b13f4cca34018b6df25222129c111e7708248174975\": rpc error: code = NotFound desc = could not find container \"4f087edba85b6e1c8bd72b13f4cca34018b6df25222129c111e7708248174975\": container with ID starting with 4f087edba85b6e1c8bd72b13f4cca34018b6df25222129c111e7708248174975 not found: ID does not exist" Jan 21 16:13:27 crc kubenswrapper[5021]: I0121 16:13:27.188533 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6632b1ab-a199-4bda-bb26-c4cb71780bb0-catalog-content\") pod \"6632b1ab-a199-4bda-bb26-c4cb71780bb0\" (UID: \"6632b1ab-a199-4bda-bb26-c4cb71780bb0\") " Jan 21 16:13:27 crc kubenswrapper[5021]: I0121 16:13:27.188704 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5l8mh\" (UniqueName: \"kubernetes.io/projected/6632b1ab-a199-4bda-bb26-c4cb71780bb0-kube-api-access-5l8mh\") pod \"6632b1ab-a199-4bda-bb26-c4cb71780bb0\" (UID: \"6632b1ab-a199-4bda-bb26-c4cb71780bb0\") " Jan 21 16:13:27 crc kubenswrapper[5021]: I0121 16:13:27.188733 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6632b1ab-a199-4bda-bb26-c4cb71780bb0-utilities\") pod \"6632b1ab-a199-4bda-bb26-c4cb71780bb0\" (UID: \"6632b1ab-a199-4bda-bb26-c4cb71780bb0\") " Jan 21 16:13:27 crc kubenswrapper[5021]: I0121 16:13:27.189676 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6632b1ab-a199-4bda-bb26-c4cb71780bb0-utilities" (OuterVolumeSpecName: "utilities") pod "6632b1ab-a199-4bda-bb26-c4cb71780bb0" (UID: "6632b1ab-a199-4bda-bb26-c4cb71780bb0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 16:13:27 crc kubenswrapper[5021]: I0121 16:13:27.193926 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6632b1ab-a199-4bda-bb26-c4cb71780bb0-kube-api-access-5l8mh" (OuterVolumeSpecName: "kube-api-access-5l8mh") pod "6632b1ab-a199-4bda-bb26-c4cb71780bb0" (UID: "6632b1ab-a199-4bda-bb26-c4cb71780bb0"). InnerVolumeSpecName "kube-api-access-5l8mh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 16:13:27 crc kubenswrapper[5021]: I0121 16:13:27.240528 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6632b1ab-a199-4bda-bb26-c4cb71780bb0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6632b1ab-a199-4bda-bb26-c4cb71780bb0" (UID: "6632b1ab-a199-4bda-bb26-c4cb71780bb0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 16:13:27 crc kubenswrapper[5021]: I0121 16:13:27.290562 5021 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6632b1ab-a199-4bda-bb26-c4cb71780bb0-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 16:13:27 crc kubenswrapper[5021]: I0121 16:13:27.290603 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5l8mh\" (UniqueName: \"kubernetes.io/projected/6632b1ab-a199-4bda-bb26-c4cb71780bb0-kube-api-access-5l8mh\") on node \"crc\" DevicePath \"\"" Jan 21 16:13:27 crc kubenswrapper[5021]: I0121 16:13:27.290615 5021 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6632b1ab-a199-4bda-bb26-c4cb71780bb0-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 16:13:27 crc kubenswrapper[5021]: I0121 16:13:27.443425 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-bjdt5"] Jan 21 16:13:27 crc kubenswrapper[5021]: I0121 16:13:27.448249 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-bjdt5"] Jan 21 16:13:28 crc kubenswrapper[5021]: I0121 16:13:28.123480 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-x8zrp" podUID="4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8" containerName="registry-server" containerID="cri-o://e5631ebc63d40c45b91dec77309c3d21f98809ac40e58e233116ab191ae71e1b" gracePeriod=2 Jan 21 16:13:28 crc kubenswrapper[5021]: I0121 16:13:28.492547 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-x8zrp" Jan 21 16:13:28 crc kubenswrapper[5021]: I0121 16:13:28.607466 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8-catalog-content\") pod \"4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8\" (UID: \"4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8\") " Jan 21 16:13:28 crc kubenswrapper[5021]: I0121 16:13:28.607599 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m8zlq\" (UniqueName: \"kubernetes.io/projected/4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8-kube-api-access-m8zlq\") pod \"4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8\" (UID: \"4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8\") " Jan 21 16:13:28 crc kubenswrapper[5021]: I0121 16:13:28.607624 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8-utilities\") pod \"4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8\" (UID: \"4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8\") " Jan 21 16:13:28 crc kubenswrapper[5021]: I0121 16:13:28.608523 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8-utilities" (OuterVolumeSpecName: "utilities") pod "4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8" (UID: "4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 16:13:28 crc kubenswrapper[5021]: I0121 16:13:28.611690 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8-kube-api-access-m8zlq" (OuterVolumeSpecName: "kube-api-access-m8zlq") pod "4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8" (UID: "4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8"). InnerVolumeSpecName "kube-api-access-m8zlq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 16:13:28 crc kubenswrapper[5021]: I0121 16:13:28.658869 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8" (UID: "4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 16:13:28 crc kubenswrapper[5021]: I0121 16:13:28.710295 5021 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 16:13:28 crc kubenswrapper[5021]: I0121 16:13:28.710344 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m8zlq\" (UniqueName: \"kubernetes.io/projected/4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8-kube-api-access-m8zlq\") on node \"crc\" DevicePath \"\"" Jan 21 16:13:28 crc kubenswrapper[5021]: I0121 16:13:28.710362 5021 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 16:13:28 crc kubenswrapper[5021]: I0121 16:13:28.748647 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6632b1ab-a199-4bda-bb26-c4cb71780bb0" path="/var/lib/kubelet/pods/6632b1ab-a199-4bda-bb26-c4cb71780bb0/volumes" Jan 21 16:13:29 crc kubenswrapper[5021]: I0121 16:13:29.133348 5021 generic.go:334] "Generic (PLEG): container finished" podID="4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8" containerID="e5631ebc63d40c45b91dec77309c3d21f98809ac40e58e233116ab191ae71e1b" exitCode=0 Jan 21 16:13:29 crc kubenswrapper[5021]: I0121 16:13:29.133393 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x8zrp" event={"ID":"4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8","Type":"ContainerDied","Data":"e5631ebc63d40c45b91dec77309c3d21f98809ac40e58e233116ab191ae71e1b"} Jan 21 16:13:29 crc kubenswrapper[5021]: I0121 16:13:29.133411 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-x8zrp" Jan 21 16:13:29 crc kubenswrapper[5021]: I0121 16:13:29.133422 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x8zrp" event={"ID":"4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8","Type":"ContainerDied","Data":"9e1c966dd0522451dc8653cd5d89fe73ab86ed52c3d2cc618e583053bae7fc05"} Jan 21 16:13:29 crc kubenswrapper[5021]: I0121 16:13:29.133443 5021 scope.go:117] "RemoveContainer" containerID="e5631ebc63d40c45b91dec77309c3d21f98809ac40e58e233116ab191ae71e1b" Jan 21 16:13:29 crc kubenswrapper[5021]: I0121 16:13:29.155734 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-x8zrp"] Jan 21 16:13:29 crc kubenswrapper[5021]: I0121 16:13:29.166836 5021 scope.go:117] "RemoveContainer" containerID="79d0597fedd6bcee18ce68deb6b091e94baa7ba3185c7c60da9b95643688ba25" Jan 21 16:13:29 crc kubenswrapper[5021]: I0121 16:13:29.182094 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-x8zrp"] Jan 21 16:13:29 crc kubenswrapper[5021]: I0121 16:13:29.202213 5021 scope.go:117] "RemoveContainer" containerID="1be7b70e4dcc897310860d46d199f2e84f0f6b471648c1a2390c28043aa9f642" Jan 21 16:13:29 crc kubenswrapper[5021]: I0121 16:13:29.218954 5021 scope.go:117] "RemoveContainer" containerID="e5631ebc63d40c45b91dec77309c3d21f98809ac40e58e233116ab191ae71e1b" Jan 21 16:13:29 crc kubenswrapper[5021]: E0121 16:13:29.219412 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e5631ebc63d40c45b91dec77309c3d21f98809ac40e58e233116ab191ae71e1b\": container with ID starting with e5631ebc63d40c45b91dec77309c3d21f98809ac40e58e233116ab191ae71e1b not found: ID does not exist" containerID="e5631ebc63d40c45b91dec77309c3d21f98809ac40e58e233116ab191ae71e1b" Jan 21 16:13:29 crc kubenswrapper[5021]: I0121 16:13:29.219450 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e5631ebc63d40c45b91dec77309c3d21f98809ac40e58e233116ab191ae71e1b"} err="failed to get container status \"e5631ebc63d40c45b91dec77309c3d21f98809ac40e58e233116ab191ae71e1b\": rpc error: code = NotFound desc = could not find container \"e5631ebc63d40c45b91dec77309c3d21f98809ac40e58e233116ab191ae71e1b\": container with ID starting with e5631ebc63d40c45b91dec77309c3d21f98809ac40e58e233116ab191ae71e1b not found: ID does not exist" Jan 21 16:13:29 crc kubenswrapper[5021]: I0121 16:13:29.219475 5021 scope.go:117] "RemoveContainer" containerID="79d0597fedd6bcee18ce68deb6b091e94baa7ba3185c7c60da9b95643688ba25" Jan 21 16:13:29 crc kubenswrapper[5021]: E0121 16:13:29.219981 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"79d0597fedd6bcee18ce68deb6b091e94baa7ba3185c7c60da9b95643688ba25\": container with ID starting with 79d0597fedd6bcee18ce68deb6b091e94baa7ba3185c7c60da9b95643688ba25 not found: ID does not exist" containerID="79d0597fedd6bcee18ce68deb6b091e94baa7ba3185c7c60da9b95643688ba25" Jan 21 16:13:29 crc kubenswrapper[5021]: I0121 16:13:29.220036 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"79d0597fedd6bcee18ce68deb6b091e94baa7ba3185c7c60da9b95643688ba25"} err="failed to get container status \"79d0597fedd6bcee18ce68deb6b091e94baa7ba3185c7c60da9b95643688ba25\": rpc error: code = NotFound desc = could not find container \"79d0597fedd6bcee18ce68deb6b091e94baa7ba3185c7c60da9b95643688ba25\": container with ID starting with 79d0597fedd6bcee18ce68deb6b091e94baa7ba3185c7c60da9b95643688ba25 not found: ID does not exist" Jan 21 16:13:29 crc kubenswrapper[5021]: I0121 16:13:29.220051 5021 scope.go:117] "RemoveContainer" containerID="1be7b70e4dcc897310860d46d199f2e84f0f6b471648c1a2390c28043aa9f642" Jan 21 16:13:29 crc kubenswrapper[5021]: E0121 16:13:29.220429 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1be7b70e4dcc897310860d46d199f2e84f0f6b471648c1a2390c28043aa9f642\": container with ID starting with 1be7b70e4dcc897310860d46d199f2e84f0f6b471648c1a2390c28043aa9f642 not found: ID does not exist" containerID="1be7b70e4dcc897310860d46d199f2e84f0f6b471648c1a2390c28043aa9f642" Jan 21 16:13:29 crc kubenswrapper[5021]: I0121 16:13:29.220487 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1be7b70e4dcc897310860d46d199f2e84f0f6b471648c1a2390c28043aa9f642"} err="failed to get container status \"1be7b70e4dcc897310860d46d199f2e84f0f6b471648c1a2390c28043aa9f642\": rpc error: code = NotFound desc = could not find container \"1be7b70e4dcc897310860d46d199f2e84f0f6b471648c1a2390c28043aa9f642\": container with ID starting with 1be7b70e4dcc897310860d46d199f2e84f0f6b471648c1a2390c28043aa9f642 not found: ID does not exist" Jan 21 16:13:30 crc kubenswrapper[5021]: I0121 16:13:30.749666 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8" path="/var/lib/kubelet/pods/4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8/volumes" Jan 21 16:13:42 crc kubenswrapper[5021]: I0121 16:13:42.357273 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 16:13:42 crc kubenswrapper[5021]: I0121 16:13:42.358064 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 16:14:12 crc kubenswrapper[5021]: I0121 16:14:12.357568 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 16:14:12 crc kubenswrapper[5021]: I0121 16:14:12.358250 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 16:14:42 crc kubenswrapper[5021]: I0121 16:14:42.357284 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 16:14:42 crc kubenswrapper[5021]: I0121 16:14:42.357860 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 16:14:42 crc kubenswrapper[5021]: I0121 16:14:42.357925 5021 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" Jan 21 16:14:42 crc kubenswrapper[5021]: I0121 16:14:42.358754 5021 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2a5bf6c88d4b8d2708dda3357b66e3937028a12d3565d50243ef6118f5036867"} pod="openshift-machine-config-operator/machine-config-daemon-n22xz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 21 16:14:42 crc kubenswrapper[5021]: I0121 16:14:42.358800 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" containerID="cri-o://2a5bf6c88d4b8d2708dda3357b66e3937028a12d3565d50243ef6118f5036867" gracePeriod=600 Jan 21 16:14:42 crc kubenswrapper[5021]: E0121 16:14:42.479341 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:14:42 crc kubenswrapper[5021]: I0121 16:14:42.679277 5021 generic.go:334] "Generic (PLEG): container finished" podID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerID="2a5bf6c88d4b8d2708dda3357b66e3937028a12d3565d50243ef6118f5036867" exitCode=0 Jan 21 16:14:42 crc kubenswrapper[5021]: I0121 16:14:42.679335 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" event={"ID":"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1","Type":"ContainerDied","Data":"2a5bf6c88d4b8d2708dda3357b66e3937028a12d3565d50243ef6118f5036867"} Jan 21 16:14:42 crc kubenswrapper[5021]: I0121 16:14:42.679413 5021 scope.go:117] "RemoveContainer" containerID="cbc78d8873867ec620819f7a63bde2e1fd0fdabfa4bf3bc1ed623bc7db2de3b8" Jan 21 16:14:42 crc kubenswrapper[5021]: I0121 16:14:42.680224 5021 scope.go:117] "RemoveContainer" containerID="2a5bf6c88d4b8d2708dda3357b66e3937028a12d3565d50243ef6118f5036867" Jan 21 16:14:42 crc kubenswrapper[5021]: E0121 16:14:42.680526 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:14:52 crc kubenswrapper[5021]: I0121 16:14:52.738725 5021 scope.go:117] "RemoveContainer" containerID="2a5bf6c88d4b8d2708dda3357b66e3937028a12d3565d50243ef6118f5036867" Jan 21 16:14:52 crc kubenswrapper[5021]: E0121 16:14:52.739362 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:15:00 crc kubenswrapper[5021]: I0121 16:15:00.151473 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483535-7gctb"] Jan 21 16:15:00 crc kubenswrapper[5021]: E0121 16:15:00.152373 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8" containerName="extract-utilities" Jan 21 16:15:00 crc kubenswrapper[5021]: I0121 16:15:00.152391 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8" containerName="extract-utilities" Jan 21 16:15:00 crc kubenswrapper[5021]: E0121 16:15:00.152416 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8" containerName="registry-server" Jan 21 16:15:00 crc kubenswrapper[5021]: I0121 16:15:00.152424 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8" containerName="registry-server" Jan 21 16:15:00 crc kubenswrapper[5021]: E0121 16:15:00.152448 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6632b1ab-a199-4bda-bb26-c4cb71780bb0" containerName="extract-content" Jan 21 16:15:00 crc kubenswrapper[5021]: I0121 16:15:00.152458 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="6632b1ab-a199-4bda-bb26-c4cb71780bb0" containerName="extract-content" Jan 21 16:15:00 crc kubenswrapper[5021]: E0121 16:15:00.152468 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6632b1ab-a199-4bda-bb26-c4cb71780bb0" containerName="extract-utilities" Jan 21 16:15:00 crc kubenswrapper[5021]: I0121 16:15:00.152475 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="6632b1ab-a199-4bda-bb26-c4cb71780bb0" containerName="extract-utilities" Jan 21 16:15:00 crc kubenswrapper[5021]: E0121 16:15:00.152488 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6632b1ab-a199-4bda-bb26-c4cb71780bb0" containerName="registry-server" Jan 21 16:15:00 crc kubenswrapper[5021]: I0121 16:15:00.152528 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="6632b1ab-a199-4bda-bb26-c4cb71780bb0" containerName="registry-server" Jan 21 16:15:00 crc kubenswrapper[5021]: E0121 16:15:00.152546 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8" containerName="extract-content" Jan 21 16:15:00 crc kubenswrapper[5021]: I0121 16:15:00.152552 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8" containerName="extract-content" Jan 21 16:15:00 crc kubenswrapper[5021]: I0121 16:15:00.152695 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="4e5ae2b2-f1d4-4cb3-81ec-0f3c75de15b8" containerName="registry-server" Jan 21 16:15:00 crc kubenswrapper[5021]: I0121 16:15:00.152708 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="6632b1ab-a199-4bda-bb26-c4cb71780bb0" containerName="registry-server" Jan 21 16:15:00 crc kubenswrapper[5021]: I0121 16:15:00.153341 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483535-7gctb" Jan 21 16:15:00 crc kubenswrapper[5021]: I0121 16:15:00.158772 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 21 16:15:00 crc kubenswrapper[5021]: I0121 16:15:00.158790 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 21 16:15:00 crc kubenswrapper[5021]: I0121 16:15:00.161506 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483535-7gctb"] Jan 21 16:15:00 crc kubenswrapper[5021]: I0121 16:15:00.294604 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0427e77d-89b1-4caf-9b8d-8c5285bab4eb-config-volume\") pod \"collect-profiles-29483535-7gctb\" (UID: \"0427e77d-89b1-4caf-9b8d-8c5285bab4eb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483535-7gctb" Jan 21 16:15:00 crc kubenswrapper[5021]: I0121 16:15:00.294743 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0427e77d-89b1-4caf-9b8d-8c5285bab4eb-secret-volume\") pod \"collect-profiles-29483535-7gctb\" (UID: \"0427e77d-89b1-4caf-9b8d-8c5285bab4eb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483535-7gctb" Jan 21 16:15:00 crc kubenswrapper[5021]: I0121 16:15:00.294787 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8d6ff\" (UniqueName: \"kubernetes.io/projected/0427e77d-89b1-4caf-9b8d-8c5285bab4eb-kube-api-access-8d6ff\") pod \"collect-profiles-29483535-7gctb\" (UID: \"0427e77d-89b1-4caf-9b8d-8c5285bab4eb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483535-7gctb" Jan 21 16:15:00 crc kubenswrapper[5021]: I0121 16:15:00.396242 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0427e77d-89b1-4caf-9b8d-8c5285bab4eb-secret-volume\") pod \"collect-profiles-29483535-7gctb\" (UID: \"0427e77d-89b1-4caf-9b8d-8c5285bab4eb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483535-7gctb" Jan 21 16:15:00 crc kubenswrapper[5021]: I0121 16:15:00.396301 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8d6ff\" (UniqueName: \"kubernetes.io/projected/0427e77d-89b1-4caf-9b8d-8c5285bab4eb-kube-api-access-8d6ff\") pod \"collect-profiles-29483535-7gctb\" (UID: \"0427e77d-89b1-4caf-9b8d-8c5285bab4eb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483535-7gctb" Jan 21 16:15:00 crc kubenswrapper[5021]: I0121 16:15:00.396338 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0427e77d-89b1-4caf-9b8d-8c5285bab4eb-config-volume\") pod \"collect-profiles-29483535-7gctb\" (UID: \"0427e77d-89b1-4caf-9b8d-8c5285bab4eb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483535-7gctb" Jan 21 16:15:00 crc kubenswrapper[5021]: I0121 16:15:00.397270 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0427e77d-89b1-4caf-9b8d-8c5285bab4eb-config-volume\") pod \"collect-profiles-29483535-7gctb\" (UID: \"0427e77d-89b1-4caf-9b8d-8c5285bab4eb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483535-7gctb" Jan 21 16:15:00 crc kubenswrapper[5021]: I0121 16:15:00.408712 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0427e77d-89b1-4caf-9b8d-8c5285bab4eb-secret-volume\") pod \"collect-profiles-29483535-7gctb\" (UID: \"0427e77d-89b1-4caf-9b8d-8c5285bab4eb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483535-7gctb" Jan 21 16:15:00 crc kubenswrapper[5021]: I0121 16:15:00.414617 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8d6ff\" (UniqueName: \"kubernetes.io/projected/0427e77d-89b1-4caf-9b8d-8c5285bab4eb-kube-api-access-8d6ff\") pod \"collect-profiles-29483535-7gctb\" (UID: \"0427e77d-89b1-4caf-9b8d-8c5285bab4eb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483535-7gctb" Jan 21 16:15:00 crc kubenswrapper[5021]: I0121 16:15:00.471450 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483535-7gctb" Jan 21 16:15:00 crc kubenswrapper[5021]: I0121 16:15:00.945708 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483535-7gctb"] Jan 21 16:15:01 crc kubenswrapper[5021]: I0121 16:15:01.814087 5021 generic.go:334] "Generic (PLEG): container finished" podID="0427e77d-89b1-4caf-9b8d-8c5285bab4eb" containerID="6f195b716f9dfc8afaad5524ba92df98a80349401941691ec84e12b8bbd7e037" exitCode=0 Jan 21 16:15:01 crc kubenswrapper[5021]: I0121 16:15:01.814129 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29483535-7gctb" event={"ID":"0427e77d-89b1-4caf-9b8d-8c5285bab4eb","Type":"ContainerDied","Data":"6f195b716f9dfc8afaad5524ba92df98a80349401941691ec84e12b8bbd7e037"} Jan 21 16:15:01 crc kubenswrapper[5021]: I0121 16:15:01.814328 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29483535-7gctb" event={"ID":"0427e77d-89b1-4caf-9b8d-8c5285bab4eb","Type":"ContainerStarted","Data":"caf1b5a513c9b1632ae10ba4c36ea2e0955801160a9ae978a951e19b4698aed7"} Jan 21 16:15:03 crc kubenswrapper[5021]: I0121 16:15:03.088508 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483535-7gctb" Jan 21 16:15:03 crc kubenswrapper[5021]: I0121 16:15:03.143559 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0427e77d-89b1-4caf-9b8d-8c5285bab4eb-secret-volume\") pod \"0427e77d-89b1-4caf-9b8d-8c5285bab4eb\" (UID: \"0427e77d-89b1-4caf-9b8d-8c5285bab4eb\") " Jan 21 16:15:03 crc kubenswrapper[5021]: I0121 16:15:03.143644 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0427e77d-89b1-4caf-9b8d-8c5285bab4eb-config-volume\") pod \"0427e77d-89b1-4caf-9b8d-8c5285bab4eb\" (UID: \"0427e77d-89b1-4caf-9b8d-8c5285bab4eb\") " Jan 21 16:15:03 crc kubenswrapper[5021]: I0121 16:15:03.143723 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8d6ff\" (UniqueName: \"kubernetes.io/projected/0427e77d-89b1-4caf-9b8d-8c5285bab4eb-kube-api-access-8d6ff\") pod \"0427e77d-89b1-4caf-9b8d-8c5285bab4eb\" (UID: \"0427e77d-89b1-4caf-9b8d-8c5285bab4eb\") " Jan 21 16:15:03 crc kubenswrapper[5021]: I0121 16:15:03.144894 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0427e77d-89b1-4caf-9b8d-8c5285bab4eb-config-volume" (OuterVolumeSpecName: "config-volume") pod "0427e77d-89b1-4caf-9b8d-8c5285bab4eb" (UID: "0427e77d-89b1-4caf-9b8d-8c5285bab4eb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 16:15:03 crc kubenswrapper[5021]: I0121 16:15:03.149614 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0427e77d-89b1-4caf-9b8d-8c5285bab4eb-kube-api-access-8d6ff" (OuterVolumeSpecName: "kube-api-access-8d6ff") pod "0427e77d-89b1-4caf-9b8d-8c5285bab4eb" (UID: "0427e77d-89b1-4caf-9b8d-8c5285bab4eb"). InnerVolumeSpecName "kube-api-access-8d6ff". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 16:15:03 crc kubenswrapper[5021]: I0121 16:15:03.149658 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0427e77d-89b1-4caf-9b8d-8c5285bab4eb-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "0427e77d-89b1-4caf-9b8d-8c5285bab4eb" (UID: "0427e77d-89b1-4caf-9b8d-8c5285bab4eb"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 16:15:03 crc kubenswrapper[5021]: I0121 16:15:03.244796 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8d6ff\" (UniqueName: \"kubernetes.io/projected/0427e77d-89b1-4caf-9b8d-8c5285bab4eb-kube-api-access-8d6ff\") on node \"crc\" DevicePath \"\"" Jan 21 16:15:03 crc kubenswrapper[5021]: I0121 16:15:03.245144 5021 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0427e77d-89b1-4caf-9b8d-8c5285bab4eb-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 21 16:15:03 crc kubenswrapper[5021]: I0121 16:15:03.245161 5021 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0427e77d-89b1-4caf-9b8d-8c5285bab4eb-config-volume\") on node \"crc\" DevicePath \"\"" Jan 21 16:15:03 crc kubenswrapper[5021]: I0121 16:15:03.738680 5021 scope.go:117] "RemoveContainer" containerID="2a5bf6c88d4b8d2708dda3357b66e3937028a12d3565d50243ef6118f5036867" Jan 21 16:15:03 crc kubenswrapper[5021]: E0121 16:15:03.739354 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:15:03 crc kubenswrapper[5021]: I0121 16:15:03.834936 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29483535-7gctb" event={"ID":"0427e77d-89b1-4caf-9b8d-8c5285bab4eb","Type":"ContainerDied","Data":"caf1b5a513c9b1632ae10ba4c36ea2e0955801160a9ae978a951e19b4698aed7"} Jan 21 16:15:03 crc kubenswrapper[5021]: I0121 16:15:03.834994 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="caf1b5a513c9b1632ae10ba4c36ea2e0955801160a9ae978a951e19b4698aed7" Jan 21 16:15:03 crc kubenswrapper[5021]: I0121 16:15:03.835079 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483535-7gctb" Jan 21 16:15:04 crc kubenswrapper[5021]: I0121 16:15:04.160988 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483490-dsk6g"] Jan 21 16:15:04 crc kubenswrapper[5021]: I0121 16:15:04.166528 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483490-dsk6g"] Jan 21 16:15:04 crc kubenswrapper[5021]: I0121 16:15:04.748630 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="522f1ec2-d915-4b24-8f6b-a6d31c807de9" path="/var/lib/kubelet/pods/522f1ec2-d915-4b24-8f6b-a6d31c807de9/volumes" Jan 21 16:15:16 crc kubenswrapper[5021]: I0121 16:15:16.738264 5021 scope.go:117] "RemoveContainer" containerID="2a5bf6c88d4b8d2708dda3357b66e3937028a12d3565d50243ef6118f5036867" Jan 21 16:15:16 crc kubenswrapper[5021]: E0121 16:15:16.739120 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:15:29 crc kubenswrapper[5021]: I0121 16:15:29.737605 5021 scope.go:117] "RemoveContainer" containerID="2a5bf6c88d4b8d2708dda3357b66e3937028a12d3565d50243ef6118f5036867" Jan 21 16:15:29 crc kubenswrapper[5021]: E0121 16:15:29.738319 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:15:41 crc kubenswrapper[5021]: I0121 16:15:41.738355 5021 scope.go:117] "RemoveContainer" containerID="2a5bf6c88d4b8d2708dda3357b66e3937028a12d3565d50243ef6118f5036867" Jan 21 16:15:41 crc kubenswrapper[5021]: E0121 16:15:41.739155 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:15:49 crc kubenswrapper[5021]: I0121 16:15:49.097082 5021 scope.go:117] "RemoveContainer" containerID="6e08e23f3f16ee0f74bb339f1d9760136b37e8b260976ab6e3a305d88a5ba8a5" Jan 21 16:15:56 crc kubenswrapper[5021]: I0121 16:15:56.738297 5021 scope.go:117] "RemoveContainer" containerID="2a5bf6c88d4b8d2708dda3357b66e3937028a12d3565d50243ef6118f5036867" Jan 21 16:15:56 crc kubenswrapper[5021]: E0121 16:15:56.739008 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:16:08 crc kubenswrapper[5021]: I0121 16:16:08.742194 5021 scope.go:117] "RemoveContainer" containerID="2a5bf6c88d4b8d2708dda3357b66e3937028a12d3565d50243ef6118f5036867" Jan 21 16:16:08 crc kubenswrapper[5021]: E0121 16:16:08.743492 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:16:19 crc kubenswrapper[5021]: I0121 16:16:19.737599 5021 scope.go:117] "RemoveContainer" containerID="2a5bf6c88d4b8d2708dda3357b66e3937028a12d3565d50243ef6118f5036867" Jan 21 16:16:19 crc kubenswrapper[5021]: E0121 16:16:19.739750 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:16:31 crc kubenswrapper[5021]: I0121 16:16:31.737703 5021 scope.go:117] "RemoveContainer" containerID="2a5bf6c88d4b8d2708dda3357b66e3937028a12d3565d50243ef6118f5036867" Jan 21 16:16:31 crc kubenswrapper[5021]: E0121 16:16:31.738520 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:16:42 crc kubenswrapper[5021]: I0121 16:16:42.737593 5021 scope.go:117] "RemoveContainer" containerID="2a5bf6c88d4b8d2708dda3357b66e3937028a12d3565d50243ef6118f5036867" Jan 21 16:16:42 crc kubenswrapper[5021]: E0121 16:16:42.739772 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:16:55 crc kubenswrapper[5021]: I0121 16:16:55.737819 5021 scope.go:117] "RemoveContainer" containerID="2a5bf6c88d4b8d2708dda3357b66e3937028a12d3565d50243ef6118f5036867" Jan 21 16:16:55 crc kubenswrapper[5021]: E0121 16:16:55.738672 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:17:07 crc kubenswrapper[5021]: I0121 16:17:07.737677 5021 scope.go:117] "RemoveContainer" containerID="2a5bf6c88d4b8d2708dda3357b66e3937028a12d3565d50243ef6118f5036867" Jan 21 16:17:07 crc kubenswrapper[5021]: E0121 16:17:07.738565 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:17:18 crc kubenswrapper[5021]: I0121 16:17:18.741080 5021 scope.go:117] "RemoveContainer" containerID="2a5bf6c88d4b8d2708dda3357b66e3937028a12d3565d50243ef6118f5036867" Jan 21 16:17:18 crc kubenswrapper[5021]: E0121 16:17:18.742761 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:17:33 crc kubenswrapper[5021]: I0121 16:17:33.737923 5021 scope.go:117] "RemoveContainer" containerID="2a5bf6c88d4b8d2708dda3357b66e3937028a12d3565d50243ef6118f5036867" Jan 21 16:17:33 crc kubenswrapper[5021]: E0121 16:17:33.738645 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:17:46 crc kubenswrapper[5021]: I0121 16:17:46.738514 5021 scope.go:117] "RemoveContainer" containerID="2a5bf6c88d4b8d2708dda3357b66e3937028a12d3565d50243ef6118f5036867" Jan 21 16:17:46 crc kubenswrapper[5021]: E0121 16:17:46.740985 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:18:00 crc kubenswrapper[5021]: I0121 16:18:00.737768 5021 scope.go:117] "RemoveContainer" containerID="2a5bf6c88d4b8d2708dda3357b66e3937028a12d3565d50243ef6118f5036867" Jan 21 16:18:00 crc kubenswrapper[5021]: E0121 16:18:00.738605 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:18:12 crc kubenswrapper[5021]: I0121 16:18:12.737700 5021 scope.go:117] "RemoveContainer" containerID="2a5bf6c88d4b8d2708dda3357b66e3937028a12d3565d50243ef6118f5036867" Jan 21 16:18:12 crc kubenswrapper[5021]: E0121 16:18:12.738399 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:18:27 crc kubenswrapper[5021]: I0121 16:18:27.737947 5021 scope.go:117] "RemoveContainer" containerID="2a5bf6c88d4b8d2708dda3357b66e3937028a12d3565d50243ef6118f5036867" Jan 21 16:18:27 crc kubenswrapper[5021]: E0121 16:18:27.738767 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:18:38 crc kubenswrapper[5021]: I0121 16:18:38.742681 5021 scope.go:117] "RemoveContainer" containerID="2a5bf6c88d4b8d2708dda3357b66e3937028a12d3565d50243ef6118f5036867" Jan 21 16:18:38 crc kubenswrapper[5021]: E0121 16:18:38.743976 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:18:52 crc kubenswrapper[5021]: I0121 16:18:52.738102 5021 scope.go:117] "RemoveContainer" containerID="2a5bf6c88d4b8d2708dda3357b66e3937028a12d3565d50243ef6118f5036867" Jan 21 16:18:52 crc kubenswrapper[5021]: E0121 16:18:52.738856 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:19:04 crc kubenswrapper[5021]: I0121 16:19:04.737650 5021 scope.go:117] "RemoveContainer" containerID="2a5bf6c88d4b8d2708dda3357b66e3937028a12d3565d50243ef6118f5036867" Jan 21 16:19:04 crc kubenswrapper[5021]: E0121 16:19:04.738321 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:19:15 crc kubenswrapper[5021]: I0121 16:19:15.738264 5021 scope.go:117] "RemoveContainer" containerID="2a5bf6c88d4b8d2708dda3357b66e3937028a12d3565d50243ef6118f5036867" Jan 21 16:19:15 crc kubenswrapper[5021]: E0121 16:19:15.740172 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:19:26 crc kubenswrapper[5021]: I0121 16:19:26.737925 5021 scope.go:117] "RemoveContainer" containerID="2a5bf6c88d4b8d2708dda3357b66e3937028a12d3565d50243ef6118f5036867" Jan 21 16:19:26 crc kubenswrapper[5021]: E0121 16:19:26.738659 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:19:40 crc kubenswrapper[5021]: I0121 16:19:40.738269 5021 scope.go:117] "RemoveContainer" containerID="2a5bf6c88d4b8d2708dda3357b66e3937028a12d3565d50243ef6118f5036867" Jan 21 16:19:40 crc kubenswrapper[5021]: E0121 16:19:40.739167 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:19:52 crc kubenswrapper[5021]: I0121 16:19:52.738371 5021 scope.go:117] "RemoveContainer" containerID="2a5bf6c88d4b8d2708dda3357b66e3937028a12d3565d50243ef6118f5036867" Jan 21 16:19:53 crc kubenswrapper[5021]: I0121 16:19:53.839379 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" event={"ID":"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1","Type":"ContainerStarted","Data":"42655b6e9060bc485d886ddd971a3d61170627eb976c2b26db3bc35bf98546ed"} Jan 21 16:20:35 crc kubenswrapper[5021]: I0121 16:20:35.227973 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-flttj"] Jan 21 16:20:35 crc kubenswrapper[5021]: E0121 16:20:35.229083 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0427e77d-89b1-4caf-9b8d-8c5285bab4eb" containerName="collect-profiles" Jan 21 16:20:35 crc kubenswrapper[5021]: I0121 16:20:35.229100 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="0427e77d-89b1-4caf-9b8d-8c5285bab4eb" containerName="collect-profiles" Jan 21 16:20:35 crc kubenswrapper[5021]: I0121 16:20:35.229262 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="0427e77d-89b1-4caf-9b8d-8c5285bab4eb" containerName="collect-profiles" Jan 21 16:20:35 crc kubenswrapper[5021]: I0121 16:20:35.230349 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-flttj" Jan 21 16:20:35 crc kubenswrapper[5021]: I0121 16:20:35.231803 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-flttj"] Jan 21 16:20:35 crc kubenswrapper[5021]: I0121 16:20:35.331484 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8398097-63b1-4f85-a371-b6aabe27eda7-utilities\") pod \"redhat-operators-flttj\" (UID: \"d8398097-63b1-4f85-a371-b6aabe27eda7\") " pod="openshift-marketplace/redhat-operators-flttj" Jan 21 16:20:35 crc kubenswrapper[5021]: I0121 16:20:35.331605 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bj4fl\" (UniqueName: \"kubernetes.io/projected/d8398097-63b1-4f85-a371-b6aabe27eda7-kube-api-access-bj4fl\") pod \"redhat-operators-flttj\" (UID: \"d8398097-63b1-4f85-a371-b6aabe27eda7\") " pod="openshift-marketplace/redhat-operators-flttj" Jan 21 16:20:35 crc kubenswrapper[5021]: I0121 16:20:35.331662 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8398097-63b1-4f85-a371-b6aabe27eda7-catalog-content\") pod \"redhat-operators-flttj\" (UID: \"d8398097-63b1-4f85-a371-b6aabe27eda7\") " pod="openshift-marketplace/redhat-operators-flttj" Jan 21 16:20:35 crc kubenswrapper[5021]: I0121 16:20:35.432639 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bj4fl\" (UniqueName: \"kubernetes.io/projected/d8398097-63b1-4f85-a371-b6aabe27eda7-kube-api-access-bj4fl\") pod \"redhat-operators-flttj\" (UID: \"d8398097-63b1-4f85-a371-b6aabe27eda7\") " pod="openshift-marketplace/redhat-operators-flttj" Jan 21 16:20:35 crc kubenswrapper[5021]: I0121 16:20:35.432726 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8398097-63b1-4f85-a371-b6aabe27eda7-catalog-content\") pod \"redhat-operators-flttj\" (UID: \"d8398097-63b1-4f85-a371-b6aabe27eda7\") " pod="openshift-marketplace/redhat-operators-flttj" Jan 21 16:20:35 crc kubenswrapper[5021]: I0121 16:20:35.432775 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8398097-63b1-4f85-a371-b6aabe27eda7-utilities\") pod \"redhat-operators-flttj\" (UID: \"d8398097-63b1-4f85-a371-b6aabe27eda7\") " pod="openshift-marketplace/redhat-operators-flttj" Jan 21 16:20:35 crc kubenswrapper[5021]: I0121 16:20:35.433218 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8398097-63b1-4f85-a371-b6aabe27eda7-utilities\") pod \"redhat-operators-flttj\" (UID: \"d8398097-63b1-4f85-a371-b6aabe27eda7\") " pod="openshift-marketplace/redhat-operators-flttj" Jan 21 16:20:35 crc kubenswrapper[5021]: I0121 16:20:35.433264 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8398097-63b1-4f85-a371-b6aabe27eda7-catalog-content\") pod \"redhat-operators-flttj\" (UID: \"d8398097-63b1-4f85-a371-b6aabe27eda7\") " pod="openshift-marketplace/redhat-operators-flttj" Jan 21 16:20:35 crc kubenswrapper[5021]: I0121 16:20:35.452498 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bj4fl\" (UniqueName: \"kubernetes.io/projected/d8398097-63b1-4f85-a371-b6aabe27eda7-kube-api-access-bj4fl\") pod \"redhat-operators-flttj\" (UID: \"d8398097-63b1-4f85-a371-b6aabe27eda7\") " pod="openshift-marketplace/redhat-operators-flttj" Jan 21 16:20:35 crc kubenswrapper[5021]: I0121 16:20:35.556872 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-flttj" Jan 21 16:20:35 crc kubenswrapper[5021]: I0121 16:20:35.994196 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-flttj"] Jan 21 16:20:36 crc kubenswrapper[5021]: I0121 16:20:36.166327 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-flttj" event={"ID":"d8398097-63b1-4f85-a371-b6aabe27eda7","Type":"ContainerStarted","Data":"89b35fb77c763a343500c5056365d5781eb18fb4ae3c1b8f26ce4ad6b61fff06"} Jan 21 16:20:36 crc kubenswrapper[5021]: I0121 16:20:36.166369 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-flttj" event={"ID":"d8398097-63b1-4f85-a371-b6aabe27eda7","Type":"ContainerStarted","Data":"0d2004593aae126608e7324631cf546a14cee63525c0e10f35ae77b124349915"} Jan 21 16:20:37 crc kubenswrapper[5021]: I0121 16:20:37.175240 5021 generic.go:334] "Generic (PLEG): container finished" podID="d8398097-63b1-4f85-a371-b6aabe27eda7" containerID="89b35fb77c763a343500c5056365d5781eb18fb4ae3c1b8f26ce4ad6b61fff06" exitCode=0 Jan 21 16:20:37 crc kubenswrapper[5021]: I0121 16:20:37.175351 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-flttj" event={"ID":"d8398097-63b1-4f85-a371-b6aabe27eda7","Type":"ContainerDied","Data":"89b35fb77c763a343500c5056365d5781eb18fb4ae3c1b8f26ce4ad6b61fff06"} Jan 21 16:20:37 crc kubenswrapper[5021]: I0121 16:20:37.178237 5021 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 21 16:20:39 crc kubenswrapper[5021]: I0121 16:20:39.190136 5021 generic.go:334] "Generic (PLEG): container finished" podID="d8398097-63b1-4f85-a371-b6aabe27eda7" containerID="f6068fb3b4a161cc920545982491a171c4af0cdd4cf668b3037f1f5f8f42b256" exitCode=0 Jan 21 16:20:39 crc kubenswrapper[5021]: I0121 16:20:39.190224 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-flttj" event={"ID":"d8398097-63b1-4f85-a371-b6aabe27eda7","Type":"ContainerDied","Data":"f6068fb3b4a161cc920545982491a171c4af0cdd4cf668b3037f1f5f8f42b256"} Jan 21 16:20:40 crc kubenswrapper[5021]: I0121 16:20:40.198252 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-flttj" event={"ID":"d8398097-63b1-4f85-a371-b6aabe27eda7","Type":"ContainerStarted","Data":"98907a8e354847129cf0e88bb000e9cad43c319cd8f1eb1978d126f587ad6523"} Jan 21 16:20:40 crc kubenswrapper[5021]: I0121 16:20:40.215538 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-flttj" podStartSLOduration=2.553522205 podStartE2EDuration="5.215522219s" podCreationTimestamp="2026-01-21 16:20:35 +0000 UTC" firstStartedPulling="2026-01-21 16:20:37.17789795 +0000 UTC m=+3378.713011839" lastFinishedPulling="2026-01-21 16:20:39.839897964 +0000 UTC m=+3381.375011853" observedRunningTime="2026-01-21 16:20:40.212706083 +0000 UTC m=+3381.747819972" watchObservedRunningTime="2026-01-21 16:20:40.215522219 +0000 UTC m=+3381.750636108" Jan 21 16:20:45 crc kubenswrapper[5021]: I0121 16:20:45.557179 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-flttj" Jan 21 16:20:45 crc kubenswrapper[5021]: I0121 16:20:45.557818 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-flttj" Jan 21 16:20:45 crc kubenswrapper[5021]: I0121 16:20:45.597241 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-flttj" Jan 21 16:20:46 crc kubenswrapper[5021]: I0121 16:20:46.276923 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-flttj" Jan 21 16:20:46 crc kubenswrapper[5021]: I0121 16:20:46.324041 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-flttj"] Jan 21 16:20:48 crc kubenswrapper[5021]: I0121 16:20:48.247597 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-flttj" podUID="d8398097-63b1-4f85-a371-b6aabe27eda7" containerName="registry-server" containerID="cri-o://98907a8e354847129cf0e88bb000e9cad43c319cd8f1eb1978d126f587ad6523" gracePeriod=2 Jan 21 16:20:53 crc kubenswrapper[5021]: I0121 16:20:53.379840 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-flttj" Jan 21 16:20:53 crc kubenswrapper[5021]: I0121 16:20:53.388455 5021 generic.go:334] "Generic (PLEG): container finished" podID="d8398097-63b1-4f85-a371-b6aabe27eda7" containerID="98907a8e354847129cf0e88bb000e9cad43c319cd8f1eb1978d126f587ad6523" exitCode=0 Jan 21 16:20:53 crc kubenswrapper[5021]: I0121 16:20:53.388496 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-flttj" event={"ID":"d8398097-63b1-4f85-a371-b6aabe27eda7","Type":"ContainerDied","Data":"98907a8e354847129cf0e88bb000e9cad43c319cd8f1eb1978d126f587ad6523"} Jan 21 16:20:53 crc kubenswrapper[5021]: I0121 16:20:53.388529 5021 scope.go:117] "RemoveContainer" containerID="98907a8e354847129cf0e88bb000e9cad43c319cd8f1eb1978d126f587ad6523" Jan 21 16:20:53 crc kubenswrapper[5021]: I0121 16:20:53.408646 5021 scope.go:117] "RemoveContainer" containerID="f6068fb3b4a161cc920545982491a171c4af0cdd4cf668b3037f1f5f8f42b256" Jan 21 16:20:53 crc kubenswrapper[5021]: I0121 16:20:53.428819 5021 scope.go:117] "RemoveContainer" containerID="89b35fb77c763a343500c5056365d5781eb18fb4ae3c1b8f26ce4ad6b61fff06" Jan 21 16:20:53 crc kubenswrapper[5021]: I0121 16:20:53.533016 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8398097-63b1-4f85-a371-b6aabe27eda7-utilities\") pod \"d8398097-63b1-4f85-a371-b6aabe27eda7\" (UID: \"d8398097-63b1-4f85-a371-b6aabe27eda7\") " Jan 21 16:20:53 crc kubenswrapper[5021]: I0121 16:20:53.533129 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8398097-63b1-4f85-a371-b6aabe27eda7-catalog-content\") pod \"d8398097-63b1-4f85-a371-b6aabe27eda7\" (UID: \"d8398097-63b1-4f85-a371-b6aabe27eda7\") " Jan 21 16:20:53 crc kubenswrapper[5021]: I0121 16:20:53.533228 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bj4fl\" (UniqueName: \"kubernetes.io/projected/d8398097-63b1-4f85-a371-b6aabe27eda7-kube-api-access-bj4fl\") pod \"d8398097-63b1-4f85-a371-b6aabe27eda7\" (UID: \"d8398097-63b1-4f85-a371-b6aabe27eda7\") " Jan 21 16:20:53 crc kubenswrapper[5021]: I0121 16:20:53.535589 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d8398097-63b1-4f85-a371-b6aabe27eda7-utilities" (OuterVolumeSpecName: "utilities") pod "d8398097-63b1-4f85-a371-b6aabe27eda7" (UID: "d8398097-63b1-4f85-a371-b6aabe27eda7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 16:20:53 crc kubenswrapper[5021]: I0121 16:20:53.538171 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8398097-63b1-4f85-a371-b6aabe27eda7-kube-api-access-bj4fl" (OuterVolumeSpecName: "kube-api-access-bj4fl") pod "d8398097-63b1-4f85-a371-b6aabe27eda7" (UID: "d8398097-63b1-4f85-a371-b6aabe27eda7"). InnerVolumeSpecName "kube-api-access-bj4fl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 16:20:53 crc kubenswrapper[5021]: I0121 16:20:53.635026 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bj4fl\" (UniqueName: \"kubernetes.io/projected/d8398097-63b1-4f85-a371-b6aabe27eda7-kube-api-access-bj4fl\") on node \"crc\" DevicePath \"\"" Jan 21 16:20:53 crc kubenswrapper[5021]: I0121 16:20:53.635061 5021 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8398097-63b1-4f85-a371-b6aabe27eda7-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 16:20:53 crc kubenswrapper[5021]: I0121 16:20:53.669676 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d8398097-63b1-4f85-a371-b6aabe27eda7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d8398097-63b1-4f85-a371-b6aabe27eda7" (UID: "d8398097-63b1-4f85-a371-b6aabe27eda7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 16:20:53 crc kubenswrapper[5021]: I0121 16:20:53.736523 5021 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8398097-63b1-4f85-a371-b6aabe27eda7-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 16:20:54 crc kubenswrapper[5021]: I0121 16:20:54.395981 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-flttj" event={"ID":"d8398097-63b1-4f85-a371-b6aabe27eda7","Type":"ContainerDied","Data":"0d2004593aae126608e7324631cf546a14cee63525c0e10f35ae77b124349915"} Jan 21 16:20:54 crc kubenswrapper[5021]: I0121 16:20:54.396061 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-flttj" Jan 21 16:20:54 crc kubenswrapper[5021]: I0121 16:20:54.429820 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-flttj"] Jan 21 16:20:54 crc kubenswrapper[5021]: I0121 16:20:54.434800 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-flttj"] Jan 21 16:20:54 crc kubenswrapper[5021]: I0121 16:20:54.747272 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d8398097-63b1-4f85-a371-b6aabe27eda7" path="/var/lib/kubelet/pods/d8398097-63b1-4f85-a371-b6aabe27eda7/volumes" Jan 21 16:22:12 crc kubenswrapper[5021]: I0121 16:22:12.357055 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 16:22:12 crc kubenswrapper[5021]: I0121 16:22:12.357702 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 16:22:21 crc kubenswrapper[5021]: I0121 16:22:21.416702 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-tgnvp"] Jan 21 16:22:21 crc kubenswrapper[5021]: E0121 16:22:21.417677 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8398097-63b1-4f85-a371-b6aabe27eda7" containerName="registry-server" Jan 21 16:22:21 crc kubenswrapper[5021]: I0121 16:22:21.417690 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8398097-63b1-4f85-a371-b6aabe27eda7" containerName="registry-server" Jan 21 16:22:21 crc kubenswrapper[5021]: E0121 16:22:21.417702 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8398097-63b1-4f85-a371-b6aabe27eda7" containerName="extract-utilities" Jan 21 16:22:21 crc kubenswrapper[5021]: I0121 16:22:21.417708 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8398097-63b1-4f85-a371-b6aabe27eda7" containerName="extract-utilities" Jan 21 16:22:21 crc kubenswrapper[5021]: E0121 16:22:21.417719 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8398097-63b1-4f85-a371-b6aabe27eda7" containerName="extract-content" Jan 21 16:22:21 crc kubenswrapper[5021]: I0121 16:22:21.417725 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8398097-63b1-4f85-a371-b6aabe27eda7" containerName="extract-content" Jan 21 16:22:21 crc kubenswrapper[5021]: I0121 16:22:21.417876 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8398097-63b1-4f85-a371-b6aabe27eda7" containerName="registry-server" Jan 21 16:22:21 crc kubenswrapper[5021]: I0121 16:22:21.418954 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tgnvp" Jan 21 16:22:21 crc kubenswrapper[5021]: I0121 16:22:21.424560 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-tgnvp"] Jan 21 16:22:21 crc kubenswrapper[5021]: I0121 16:22:21.541753 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed-utilities\") pod \"redhat-marketplace-tgnvp\" (UID: \"9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed\") " pod="openshift-marketplace/redhat-marketplace-tgnvp" Jan 21 16:22:21 crc kubenswrapper[5021]: I0121 16:22:21.541833 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed-catalog-content\") pod \"redhat-marketplace-tgnvp\" (UID: \"9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed\") " pod="openshift-marketplace/redhat-marketplace-tgnvp" Jan 21 16:22:21 crc kubenswrapper[5021]: I0121 16:22:21.541870 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bs984\" (UniqueName: \"kubernetes.io/projected/9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed-kube-api-access-bs984\") pod \"redhat-marketplace-tgnvp\" (UID: \"9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed\") " pod="openshift-marketplace/redhat-marketplace-tgnvp" Jan 21 16:22:21 crc kubenswrapper[5021]: I0121 16:22:21.643879 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed-catalog-content\") pod \"redhat-marketplace-tgnvp\" (UID: \"9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed\") " pod="openshift-marketplace/redhat-marketplace-tgnvp" Jan 21 16:22:21 crc kubenswrapper[5021]: I0121 16:22:21.644000 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bs984\" (UniqueName: \"kubernetes.io/projected/9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed-kube-api-access-bs984\") pod \"redhat-marketplace-tgnvp\" (UID: \"9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed\") " pod="openshift-marketplace/redhat-marketplace-tgnvp" Jan 21 16:22:21 crc kubenswrapper[5021]: I0121 16:22:21.644105 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed-utilities\") pod \"redhat-marketplace-tgnvp\" (UID: \"9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed\") " pod="openshift-marketplace/redhat-marketplace-tgnvp" Jan 21 16:22:21 crc kubenswrapper[5021]: I0121 16:22:21.644996 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed-utilities\") pod \"redhat-marketplace-tgnvp\" (UID: \"9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed\") " pod="openshift-marketplace/redhat-marketplace-tgnvp" Jan 21 16:22:21 crc kubenswrapper[5021]: I0121 16:22:21.645094 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed-catalog-content\") pod \"redhat-marketplace-tgnvp\" (UID: \"9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed\") " pod="openshift-marketplace/redhat-marketplace-tgnvp" Jan 21 16:22:21 crc kubenswrapper[5021]: I0121 16:22:21.665700 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bs984\" (UniqueName: \"kubernetes.io/projected/9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed-kube-api-access-bs984\") pod \"redhat-marketplace-tgnvp\" (UID: \"9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed\") " pod="openshift-marketplace/redhat-marketplace-tgnvp" Jan 21 16:22:21 crc kubenswrapper[5021]: I0121 16:22:21.744134 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tgnvp" Jan 21 16:22:21 crc kubenswrapper[5021]: I0121 16:22:21.985656 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-tgnvp"] Jan 21 16:22:22 crc kubenswrapper[5021]: I0121 16:22:22.993212 5021 generic.go:334] "Generic (PLEG): container finished" podID="9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed" containerID="2f4fa3054cec1f6bf2e147d06bbaea7fd00458817d79d391d9f49eb722f282ab" exitCode=0 Jan 21 16:22:22 crc kubenswrapper[5021]: I0121 16:22:22.993529 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tgnvp" event={"ID":"9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed","Type":"ContainerDied","Data":"2f4fa3054cec1f6bf2e147d06bbaea7fd00458817d79d391d9f49eb722f282ab"} Jan 21 16:22:22 crc kubenswrapper[5021]: I0121 16:22:22.993557 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tgnvp" event={"ID":"9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed","Type":"ContainerStarted","Data":"9df6be7682f61012d384e12b868ea9f54a07996ddfc40d19cfe7740c08b80383"} Jan 21 16:22:24 crc kubenswrapper[5021]: I0121 16:22:24.002287 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tgnvp" event={"ID":"9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed","Type":"ContainerStarted","Data":"7a7e2244d1fef3d23c6246187fa212fcce0a32309eaaf25b96e514db9f7481c7"} Jan 21 16:22:25 crc kubenswrapper[5021]: I0121 16:22:25.010471 5021 generic.go:334] "Generic (PLEG): container finished" podID="9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed" containerID="7a7e2244d1fef3d23c6246187fa212fcce0a32309eaaf25b96e514db9f7481c7" exitCode=0 Jan 21 16:22:25 crc kubenswrapper[5021]: I0121 16:22:25.010558 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tgnvp" event={"ID":"9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed","Type":"ContainerDied","Data":"7a7e2244d1fef3d23c6246187fa212fcce0a32309eaaf25b96e514db9f7481c7"} Jan 21 16:22:27 crc kubenswrapper[5021]: I0121 16:22:27.026894 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tgnvp" event={"ID":"9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed","Type":"ContainerStarted","Data":"d1c7218087395346d6887789974bc83c29ca4764f2f6bd8a35c0b7687435de92"} Jan 21 16:22:27 crc kubenswrapper[5021]: I0121 16:22:27.049941 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-tgnvp" podStartSLOduration=3.098424815 podStartE2EDuration="6.049899546s" podCreationTimestamp="2026-01-21 16:22:21 +0000 UTC" firstStartedPulling="2026-01-21 16:22:22.995519764 +0000 UTC m=+3484.530633653" lastFinishedPulling="2026-01-21 16:22:25.946994495 +0000 UTC m=+3487.482108384" observedRunningTime="2026-01-21 16:22:27.048103997 +0000 UTC m=+3488.583217896" watchObservedRunningTime="2026-01-21 16:22:27.049899546 +0000 UTC m=+3488.585013435" Jan 21 16:22:31 crc kubenswrapper[5021]: I0121 16:22:31.744814 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-tgnvp" Jan 21 16:22:31 crc kubenswrapper[5021]: I0121 16:22:31.746000 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-tgnvp" Jan 21 16:22:31 crc kubenswrapper[5021]: I0121 16:22:31.790526 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-tgnvp" Jan 21 16:22:32 crc kubenswrapper[5021]: I0121 16:22:32.100766 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-tgnvp" Jan 21 16:22:32 crc kubenswrapper[5021]: I0121 16:22:32.151081 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-tgnvp"] Jan 21 16:22:34 crc kubenswrapper[5021]: I0121 16:22:34.074896 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-tgnvp" podUID="9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed" containerName="registry-server" containerID="cri-o://d1c7218087395346d6887789974bc83c29ca4764f2f6bd8a35c0b7687435de92" gracePeriod=2 Jan 21 16:22:34 crc kubenswrapper[5021]: I0121 16:22:34.638489 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tgnvp" Jan 21 16:22:34 crc kubenswrapper[5021]: I0121 16:22:34.836325 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bs984\" (UniqueName: \"kubernetes.io/projected/9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed-kube-api-access-bs984\") pod \"9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed\" (UID: \"9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed\") " Jan 21 16:22:34 crc kubenswrapper[5021]: I0121 16:22:34.836393 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed-utilities\") pod \"9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed\" (UID: \"9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed\") " Jan 21 16:22:34 crc kubenswrapper[5021]: I0121 16:22:34.836454 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed-catalog-content\") pod \"9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed\" (UID: \"9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed\") " Jan 21 16:22:34 crc kubenswrapper[5021]: I0121 16:22:34.837594 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed-utilities" (OuterVolumeSpecName: "utilities") pod "9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed" (UID: "9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 16:22:34 crc kubenswrapper[5021]: I0121 16:22:34.842360 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed-kube-api-access-bs984" (OuterVolumeSpecName: "kube-api-access-bs984") pod "9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed" (UID: "9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed"). InnerVolumeSpecName "kube-api-access-bs984". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 16:22:34 crc kubenswrapper[5021]: I0121 16:22:34.861244 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed" (UID: "9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 16:22:34 crc kubenswrapper[5021]: I0121 16:22:34.938942 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bs984\" (UniqueName: \"kubernetes.io/projected/9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed-kube-api-access-bs984\") on node \"crc\" DevicePath \"\"" Jan 21 16:22:34 crc kubenswrapper[5021]: I0121 16:22:34.938978 5021 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 16:22:34 crc kubenswrapper[5021]: I0121 16:22:34.939011 5021 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 16:22:35 crc kubenswrapper[5021]: I0121 16:22:35.085149 5021 generic.go:334] "Generic (PLEG): container finished" podID="9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed" containerID="d1c7218087395346d6887789974bc83c29ca4764f2f6bd8a35c0b7687435de92" exitCode=0 Jan 21 16:22:35 crc kubenswrapper[5021]: I0121 16:22:35.086633 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tgnvp" event={"ID":"9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed","Type":"ContainerDied","Data":"d1c7218087395346d6887789974bc83c29ca4764f2f6bd8a35c0b7687435de92"} Jan 21 16:22:35 crc kubenswrapper[5021]: I0121 16:22:35.086782 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tgnvp" event={"ID":"9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed","Type":"ContainerDied","Data":"9df6be7682f61012d384e12b868ea9f54a07996ddfc40d19cfe7740c08b80383"} Jan 21 16:22:35 crc kubenswrapper[5021]: I0121 16:22:35.086853 5021 scope.go:117] "RemoveContainer" containerID="d1c7218087395346d6887789974bc83c29ca4764f2f6bd8a35c0b7687435de92" Jan 21 16:22:35 crc kubenswrapper[5021]: I0121 16:22:35.087089 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tgnvp" Jan 21 16:22:35 crc kubenswrapper[5021]: I0121 16:22:35.115344 5021 scope.go:117] "RemoveContainer" containerID="7a7e2244d1fef3d23c6246187fa212fcce0a32309eaaf25b96e514db9f7481c7" Jan 21 16:22:35 crc kubenswrapper[5021]: I0121 16:22:35.125621 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-tgnvp"] Jan 21 16:22:35 crc kubenswrapper[5021]: I0121 16:22:35.136875 5021 scope.go:117] "RemoveContainer" containerID="2f4fa3054cec1f6bf2e147d06bbaea7fd00458817d79d391d9f49eb722f282ab" Jan 21 16:22:35 crc kubenswrapper[5021]: I0121 16:22:35.139128 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-tgnvp"] Jan 21 16:22:35 crc kubenswrapper[5021]: I0121 16:22:35.166457 5021 scope.go:117] "RemoveContainer" containerID="d1c7218087395346d6887789974bc83c29ca4764f2f6bd8a35c0b7687435de92" Jan 21 16:22:35 crc kubenswrapper[5021]: E0121 16:22:35.166961 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d1c7218087395346d6887789974bc83c29ca4764f2f6bd8a35c0b7687435de92\": container with ID starting with d1c7218087395346d6887789974bc83c29ca4764f2f6bd8a35c0b7687435de92 not found: ID does not exist" containerID="d1c7218087395346d6887789974bc83c29ca4764f2f6bd8a35c0b7687435de92" Jan 21 16:22:35 crc kubenswrapper[5021]: I0121 16:22:35.167027 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d1c7218087395346d6887789974bc83c29ca4764f2f6bd8a35c0b7687435de92"} err="failed to get container status \"d1c7218087395346d6887789974bc83c29ca4764f2f6bd8a35c0b7687435de92\": rpc error: code = NotFound desc = could not find container \"d1c7218087395346d6887789974bc83c29ca4764f2f6bd8a35c0b7687435de92\": container with ID starting with d1c7218087395346d6887789974bc83c29ca4764f2f6bd8a35c0b7687435de92 not found: ID does not exist" Jan 21 16:22:35 crc kubenswrapper[5021]: I0121 16:22:35.167079 5021 scope.go:117] "RemoveContainer" containerID="7a7e2244d1fef3d23c6246187fa212fcce0a32309eaaf25b96e514db9f7481c7" Jan 21 16:22:35 crc kubenswrapper[5021]: E0121 16:22:35.167429 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7a7e2244d1fef3d23c6246187fa212fcce0a32309eaaf25b96e514db9f7481c7\": container with ID starting with 7a7e2244d1fef3d23c6246187fa212fcce0a32309eaaf25b96e514db9f7481c7 not found: ID does not exist" containerID="7a7e2244d1fef3d23c6246187fa212fcce0a32309eaaf25b96e514db9f7481c7" Jan 21 16:22:35 crc kubenswrapper[5021]: I0121 16:22:35.167518 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7a7e2244d1fef3d23c6246187fa212fcce0a32309eaaf25b96e514db9f7481c7"} err="failed to get container status \"7a7e2244d1fef3d23c6246187fa212fcce0a32309eaaf25b96e514db9f7481c7\": rpc error: code = NotFound desc = could not find container \"7a7e2244d1fef3d23c6246187fa212fcce0a32309eaaf25b96e514db9f7481c7\": container with ID starting with 7a7e2244d1fef3d23c6246187fa212fcce0a32309eaaf25b96e514db9f7481c7 not found: ID does not exist" Jan 21 16:22:35 crc kubenswrapper[5021]: I0121 16:22:35.167594 5021 scope.go:117] "RemoveContainer" containerID="2f4fa3054cec1f6bf2e147d06bbaea7fd00458817d79d391d9f49eb722f282ab" Jan 21 16:22:35 crc kubenswrapper[5021]: E0121 16:22:35.167856 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2f4fa3054cec1f6bf2e147d06bbaea7fd00458817d79d391d9f49eb722f282ab\": container with ID starting with 2f4fa3054cec1f6bf2e147d06bbaea7fd00458817d79d391d9f49eb722f282ab not found: ID does not exist" containerID="2f4fa3054cec1f6bf2e147d06bbaea7fd00458817d79d391d9f49eb722f282ab" Jan 21 16:22:35 crc kubenswrapper[5021]: I0121 16:22:35.167885 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2f4fa3054cec1f6bf2e147d06bbaea7fd00458817d79d391d9f49eb722f282ab"} err="failed to get container status \"2f4fa3054cec1f6bf2e147d06bbaea7fd00458817d79d391d9f49eb722f282ab\": rpc error: code = NotFound desc = could not find container \"2f4fa3054cec1f6bf2e147d06bbaea7fd00458817d79d391d9f49eb722f282ab\": container with ID starting with 2f4fa3054cec1f6bf2e147d06bbaea7fd00458817d79d391d9f49eb722f282ab not found: ID does not exist" Jan 21 16:22:36 crc kubenswrapper[5021]: I0121 16:22:36.748677 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed" path="/var/lib/kubelet/pods/9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed/volumes" Jan 21 16:22:42 crc kubenswrapper[5021]: I0121 16:22:42.357540 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 16:22:42 crc kubenswrapper[5021]: I0121 16:22:42.358082 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 16:23:12 crc kubenswrapper[5021]: I0121 16:23:12.357640 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 16:23:12 crc kubenswrapper[5021]: I0121 16:23:12.358327 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 16:23:12 crc kubenswrapper[5021]: I0121 16:23:12.358377 5021 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" Jan 21 16:23:12 crc kubenswrapper[5021]: I0121 16:23:12.359123 5021 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"42655b6e9060bc485d886ddd971a3d61170627eb976c2b26db3bc35bf98546ed"} pod="openshift-machine-config-operator/machine-config-daemon-n22xz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 21 16:23:12 crc kubenswrapper[5021]: I0121 16:23:12.359194 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" containerID="cri-o://42655b6e9060bc485d886ddd971a3d61170627eb976c2b26db3bc35bf98546ed" gracePeriod=600 Jan 21 16:23:12 crc kubenswrapper[5021]: I0121 16:23:12.598469 5021 generic.go:334] "Generic (PLEG): container finished" podID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerID="42655b6e9060bc485d886ddd971a3d61170627eb976c2b26db3bc35bf98546ed" exitCode=0 Jan 21 16:23:12 crc kubenswrapper[5021]: I0121 16:23:12.598534 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" event={"ID":"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1","Type":"ContainerDied","Data":"42655b6e9060bc485d886ddd971a3d61170627eb976c2b26db3bc35bf98546ed"} Jan 21 16:23:12 crc kubenswrapper[5021]: I0121 16:23:12.598849 5021 scope.go:117] "RemoveContainer" containerID="2a5bf6c88d4b8d2708dda3357b66e3937028a12d3565d50243ef6118f5036867" Jan 21 16:23:13 crc kubenswrapper[5021]: I0121 16:23:13.609506 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" event={"ID":"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1","Type":"ContainerStarted","Data":"2b478fc442abbb775fd61836bc7953eb653808ff3f07906e91a7cce619b817f8"} Jan 21 16:23:39 crc kubenswrapper[5021]: I0121 16:23:39.002900 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-bh66m"] Jan 21 16:23:39 crc kubenswrapper[5021]: E0121 16:23:39.003811 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed" containerName="extract-content" Jan 21 16:23:39 crc kubenswrapper[5021]: I0121 16:23:39.003824 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed" containerName="extract-content" Jan 21 16:23:39 crc kubenswrapper[5021]: E0121 16:23:39.003845 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed" containerName="extract-utilities" Jan 21 16:23:39 crc kubenswrapper[5021]: I0121 16:23:39.003851 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed" containerName="extract-utilities" Jan 21 16:23:39 crc kubenswrapper[5021]: E0121 16:23:39.003866 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed" containerName="registry-server" Jan 21 16:23:39 crc kubenswrapper[5021]: I0121 16:23:39.003872 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed" containerName="registry-server" Jan 21 16:23:39 crc kubenswrapper[5021]: I0121 16:23:39.004109 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="9ea7cc5f-0f51-493c-9f11-3007fa7ea5ed" containerName="registry-server" Jan 21 16:23:39 crc kubenswrapper[5021]: I0121 16:23:39.005157 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bh66m" Jan 21 16:23:39 crc kubenswrapper[5021]: I0121 16:23:39.009838 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bh66m"] Jan 21 16:23:39 crc kubenswrapper[5021]: I0121 16:23:39.089478 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a648964-a42b-4f00-9c80-f3dbdf071df9-catalog-content\") pod \"community-operators-bh66m\" (UID: \"5a648964-a42b-4f00-9c80-f3dbdf071df9\") " pod="openshift-marketplace/community-operators-bh66m" Jan 21 16:23:39 crc kubenswrapper[5021]: I0121 16:23:39.089536 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a648964-a42b-4f00-9c80-f3dbdf071df9-utilities\") pod \"community-operators-bh66m\" (UID: \"5a648964-a42b-4f00-9c80-f3dbdf071df9\") " pod="openshift-marketplace/community-operators-bh66m" Jan 21 16:23:39 crc kubenswrapper[5021]: I0121 16:23:39.089708 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wvb89\" (UniqueName: \"kubernetes.io/projected/5a648964-a42b-4f00-9c80-f3dbdf071df9-kube-api-access-wvb89\") pod \"community-operators-bh66m\" (UID: \"5a648964-a42b-4f00-9c80-f3dbdf071df9\") " pod="openshift-marketplace/community-operators-bh66m" Jan 21 16:23:39 crc kubenswrapper[5021]: I0121 16:23:39.190514 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a648964-a42b-4f00-9c80-f3dbdf071df9-catalog-content\") pod \"community-operators-bh66m\" (UID: \"5a648964-a42b-4f00-9c80-f3dbdf071df9\") " pod="openshift-marketplace/community-operators-bh66m" Jan 21 16:23:39 crc kubenswrapper[5021]: I0121 16:23:39.190571 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a648964-a42b-4f00-9c80-f3dbdf071df9-utilities\") pod \"community-operators-bh66m\" (UID: \"5a648964-a42b-4f00-9c80-f3dbdf071df9\") " pod="openshift-marketplace/community-operators-bh66m" Jan 21 16:23:39 crc kubenswrapper[5021]: I0121 16:23:39.190639 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wvb89\" (UniqueName: \"kubernetes.io/projected/5a648964-a42b-4f00-9c80-f3dbdf071df9-kube-api-access-wvb89\") pod \"community-operators-bh66m\" (UID: \"5a648964-a42b-4f00-9c80-f3dbdf071df9\") " pod="openshift-marketplace/community-operators-bh66m" Jan 21 16:23:39 crc kubenswrapper[5021]: I0121 16:23:39.191033 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a648964-a42b-4f00-9c80-f3dbdf071df9-catalog-content\") pod \"community-operators-bh66m\" (UID: \"5a648964-a42b-4f00-9c80-f3dbdf071df9\") " pod="openshift-marketplace/community-operators-bh66m" Jan 21 16:23:39 crc kubenswrapper[5021]: I0121 16:23:39.191075 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a648964-a42b-4f00-9c80-f3dbdf071df9-utilities\") pod \"community-operators-bh66m\" (UID: \"5a648964-a42b-4f00-9c80-f3dbdf071df9\") " pod="openshift-marketplace/community-operators-bh66m" Jan 21 16:23:39 crc kubenswrapper[5021]: I0121 16:23:39.212839 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wvb89\" (UniqueName: \"kubernetes.io/projected/5a648964-a42b-4f00-9c80-f3dbdf071df9-kube-api-access-wvb89\") pod \"community-operators-bh66m\" (UID: \"5a648964-a42b-4f00-9c80-f3dbdf071df9\") " pod="openshift-marketplace/community-operators-bh66m" Jan 21 16:23:39 crc kubenswrapper[5021]: I0121 16:23:39.341348 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bh66m" Jan 21 16:23:39 crc kubenswrapper[5021]: I0121 16:23:39.822557 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bh66m"] Jan 21 16:23:40 crc kubenswrapper[5021]: I0121 16:23:40.822876 5021 generic.go:334] "Generic (PLEG): container finished" podID="5a648964-a42b-4f00-9c80-f3dbdf071df9" containerID="34542756ac0a238eaeac1cbdf2a75fa499fa0bb6fafd2cbc186fdb2799fda142" exitCode=0 Jan 21 16:23:40 crc kubenswrapper[5021]: I0121 16:23:40.822973 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bh66m" event={"ID":"5a648964-a42b-4f00-9c80-f3dbdf071df9","Type":"ContainerDied","Data":"34542756ac0a238eaeac1cbdf2a75fa499fa0bb6fafd2cbc186fdb2799fda142"} Jan 21 16:23:40 crc kubenswrapper[5021]: I0121 16:23:40.823198 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bh66m" event={"ID":"5a648964-a42b-4f00-9c80-f3dbdf071df9","Type":"ContainerStarted","Data":"cf651c91b7df5edc3e8652a07851a6b4c24ce5ef86bbb62149642b46a341f37a"} Jan 21 16:23:43 crc kubenswrapper[5021]: I0121 16:23:43.774575 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-f875x"] Jan 21 16:23:43 crc kubenswrapper[5021]: I0121 16:23:43.776254 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-f875x" Jan 21 16:23:43 crc kubenswrapper[5021]: I0121 16:23:43.784819 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-f875x"] Jan 21 16:23:43 crc kubenswrapper[5021]: I0121 16:23:43.960710 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g7vzx\" (UniqueName: \"kubernetes.io/projected/bb81582b-4eb1-498c-a458-41c939b1eacd-kube-api-access-g7vzx\") pod \"certified-operators-f875x\" (UID: \"bb81582b-4eb1-498c-a458-41c939b1eacd\") " pod="openshift-marketplace/certified-operators-f875x" Jan 21 16:23:43 crc kubenswrapper[5021]: I0121 16:23:43.960760 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bb81582b-4eb1-498c-a458-41c939b1eacd-catalog-content\") pod \"certified-operators-f875x\" (UID: \"bb81582b-4eb1-498c-a458-41c939b1eacd\") " pod="openshift-marketplace/certified-operators-f875x" Jan 21 16:23:43 crc kubenswrapper[5021]: I0121 16:23:43.960798 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bb81582b-4eb1-498c-a458-41c939b1eacd-utilities\") pod \"certified-operators-f875x\" (UID: \"bb81582b-4eb1-498c-a458-41c939b1eacd\") " pod="openshift-marketplace/certified-operators-f875x" Jan 21 16:23:44 crc kubenswrapper[5021]: I0121 16:23:44.062524 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bb81582b-4eb1-498c-a458-41c939b1eacd-utilities\") pod \"certified-operators-f875x\" (UID: \"bb81582b-4eb1-498c-a458-41c939b1eacd\") " pod="openshift-marketplace/certified-operators-f875x" Jan 21 16:23:44 crc kubenswrapper[5021]: I0121 16:23:44.062686 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g7vzx\" (UniqueName: \"kubernetes.io/projected/bb81582b-4eb1-498c-a458-41c939b1eacd-kube-api-access-g7vzx\") pod \"certified-operators-f875x\" (UID: \"bb81582b-4eb1-498c-a458-41c939b1eacd\") " pod="openshift-marketplace/certified-operators-f875x" Jan 21 16:23:44 crc kubenswrapper[5021]: I0121 16:23:44.062715 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bb81582b-4eb1-498c-a458-41c939b1eacd-catalog-content\") pod \"certified-operators-f875x\" (UID: \"bb81582b-4eb1-498c-a458-41c939b1eacd\") " pod="openshift-marketplace/certified-operators-f875x" Jan 21 16:23:44 crc kubenswrapper[5021]: I0121 16:23:44.063156 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bb81582b-4eb1-498c-a458-41c939b1eacd-utilities\") pod \"certified-operators-f875x\" (UID: \"bb81582b-4eb1-498c-a458-41c939b1eacd\") " pod="openshift-marketplace/certified-operators-f875x" Jan 21 16:23:44 crc kubenswrapper[5021]: I0121 16:23:44.063252 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bb81582b-4eb1-498c-a458-41c939b1eacd-catalog-content\") pod \"certified-operators-f875x\" (UID: \"bb81582b-4eb1-498c-a458-41c939b1eacd\") " pod="openshift-marketplace/certified-operators-f875x" Jan 21 16:23:44 crc kubenswrapper[5021]: I0121 16:23:44.086212 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g7vzx\" (UniqueName: \"kubernetes.io/projected/bb81582b-4eb1-498c-a458-41c939b1eacd-kube-api-access-g7vzx\") pod \"certified-operators-f875x\" (UID: \"bb81582b-4eb1-498c-a458-41c939b1eacd\") " pod="openshift-marketplace/certified-operators-f875x" Jan 21 16:23:44 crc kubenswrapper[5021]: I0121 16:23:44.093275 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-f875x" Jan 21 16:23:44 crc kubenswrapper[5021]: I0121 16:23:44.599131 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-f875x"] Jan 21 16:23:44 crc kubenswrapper[5021]: I0121 16:23:44.853664 5021 generic.go:334] "Generic (PLEG): container finished" podID="bb81582b-4eb1-498c-a458-41c939b1eacd" containerID="7e181b77f1ab3cea7a71c32a876ec74a5fd4113a5f0b1a8503eae06f41a557a1" exitCode=0 Jan 21 16:23:44 crc kubenswrapper[5021]: I0121 16:23:44.853723 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f875x" event={"ID":"bb81582b-4eb1-498c-a458-41c939b1eacd","Type":"ContainerDied","Data":"7e181b77f1ab3cea7a71c32a876ec74a5fd4113a5f0b1a8503eae06f41a557a1"} Jan 21 16:23:44 crc kubenswrapper[5021]: I0121 16:23:44.853756 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f875x" event={"ID":"bb81582b-4eb1-498c-a458-41c939b1eacd","Type":"ContainerStarted","Data":"115d23d7d112ce1f25c3ef7cdeaf9d63383ada4340d1a760ae645157cb892b40"} Jan 21 16:23:45 crc kubenswrapper[5021]: I0121 16:23:45.863571 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f875x" event={"ID":"bb81582b-4eb1-498c-a458-41c939b1eacd","Type":"ContainerStarted","Data":"7e278a9ecf9b3ac04ebc6f1cc062c1f860756d84792874d89141849988385690"} Jan 21 16:23:45 crc kubenswrapper[5021]: I0121 16:23:45.876049 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bh66m" event={"ID":"5a648964-a42b-4f00-9c80-f3dbdf071df9","Type":"ContainerStarted","Data":"fcb4651178d88bbe57e1d48e67862f5c985c9006f6533d9f925652f7a507e803"} Jan 21 16:23:46 crc kubenswrapper[5021]: I0121 16:23:46.896645 5021 generic.go:334] "Generic (PLEG): container finished" podID="bb81582b-4eb1-498c-a458-41c939b1eacd" containerID="7e278a9ecf9b3ac04ebc6f1cc062c1f860756d84792874d89141849988385690" exitCode=0 Jan 21 16:23:46 crc kubenswrapper[5021]: I0121 16:23:46.897397 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f875x" event={"ID":"bb81582b-4eb1-498c-a458-41c939b1eacd","Type":"ContainerDied","Data":"7e278a9ecf9b3ac04ebc6f1cc062c1f860756d84792874d89141849988385690"} Jan 21 16:23:46 crc kubenswrapper[5021]: I0121 16:23:46.902398 5021 generic.go:334] "Generic (PLEG): container finished" podID="5a648964-a42b-4f00-9c80-f3dbdf071df9" containerID="fcb4651178d88bbe57e1d48e67862f5c985c9006f6533d9f925652f7a507e803" exitCode=0 Jan 21 16:23:46 crc kubenswrapper[5021]: I0121 16:23:46.902815 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bh66m" event={"ID":"5a648964-a42b-4f00-9c80-f3dbdf071df9","Type":"ContainerDied","Data":"fcb4651178d88bbe57e1d48e67862f5c985c9006f6533d9f925652f7a507e803"} Jan 21 16:23:47 crc kubenswrapper[5021]: I0121 16:23:47.910784 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f875x" event={"ID":"bb81582b-4eb1-498c-a458-41c939b1eacd","Type":"ContainerStarted","Data":"82bdadc387d2d6b66dc8eb81581c465d117914facc0a98b0691daf30ea36ceb3"} Jan 21 16:23:47 crc kubenswrapper[5021]: I0121 16:23:47.917209 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bh66m" event={"ID":"5a648964-a42b-4f00-9c80-f3dbdf071df9","Type":"ContainerStarted","Data":"891b005e01e1d41009b2c679e0175eb5e8cd3e778e611bb2f144610560dc97dc"} Jan 21 16:23:47 crc kubenswrapper[5021]: I0121 16:23:47.935583 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-f875x" podStartSLOduration=2.139270082 podStartE2EDuration="4.93556288s" podCreationTimestamp="2026-01-21 16:23:43 +0000 UTC" firstStartedPulling="2026-01-21 16:23:44.855299303 +0000 UTC m=+3566.390413192" lastFinishedPulling="2026-01-21 16:23:47.651592101 +0000 UTC m=+3569.186705990" observedRunningTime="2026-01-21 16:23:47.929233671 +0000 UTC m=+3569.464347580" watchObservedRunningTime="2026-01-21 16:23:47.93556288 +0000 UTC m=+3569.470676769" Jan 21 16:23:47 crc kubenswrapper[5021]: I0121 16:23:47.948153 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-bh66m" podStartSLOduration=3.187647445 podStartE2EDuration="9.948130238s" podCreationTimestamp="2026-01-21 16:23:38 +0000 UTC" firstStartedPulling="2026-01-21 16:23:40.825208045 +0000 UTC m=+3562.360321934" lastFinishedPulling="2026-01-21 16:23:47.585690848 +0000 UTC m=+3569.120804727" observedRunningTime="2026-01-21 16:23:47.945514648 +0000 UTC m=+3569.480628547" watchObservedRunningTime="2026-01-21 16:23:47.948130238 +0000 UTC m=+3569.483244137" Jan 21 16:23:49 crc kubenswrapper[5021]: I0121 16:23:49.342076 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-bh66m" Jan 21 16:23:49 crc kubenswrapper[5021]: I0121 16:23:49.342137 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-bh66m" Jan 21 16:23:50 crc kubenswrapper[5021]: I0121 16:23:50.386155 5021 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-bh66m" podUID="5a648964-a42b-4f00-9c80-f3dbdf071df9" containerName="registry-server" probeResult="failure" output=< Jan 21 16:23:50 crc kubenswrapper[5021]: timeout: failed to connect service ":50051" within 1s Jan 21 16:23:50 crc kubenswrapper[5021]: > Jan 21 16:23:54 crc kubenswrapper[5021]: I0121 16:23:54.094334 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-f875x" Jan 21 16:23:54 crc kubenswrapper[5021]: I0121 16:23:54.094717 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-f875x" Jan 21 16:23:54 crc kubenswrapper[5021]: I0121 16:23:54.136825 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-f875x" Jan 21 16:23:55 crc kubenswrapper[5021]: I0121 16:23:55.013503 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-f875x" Jan 21 16:23:55 crc kubenswrapper[5021]: I0121 16:23:55.369416 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-f875x"] Jan 21 16:23:56 crc kubenswrapper[5021]: I0121 16:23:56.981985 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-f875x" podUID="bb81582b-4eb1-498c-a458-41c939b1eacd" containerName="registry-server" containerID="cri-o://82bdadc387d2d6b66dc8eb81581c465d117914facc0a98b0691daf30ea36ceb3" gracePeriod=2 Jan 21 16:23:58 crc kubenswrapper[5021]: I0121 16:23:58.996361 5021 generic.go:334] "Generic (PLEG): container finished" podID="bb81582b-4eb1-498c-a458-41c939b1eacd" containerID="82bdadc387d2d6b66dc8eb81581c465d117914facc0a98b0691daf30ea36ceb3" exitCode=0 Jan 21 16:23:58 crc kubenswrapper[5021]: I0121 16:23:58.996387 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f875x" event={"ID":"bb81582b-4eb1-498c-a458-41c939b1eacd","Type":"ContainerDied","Data":"82bdadc387d2d6b66dc8eb81581c465d117914facc0a98b0691daf30ea36ceb3"} Jan 21 16:23:59 crc kubenswrapper[5021]: I0121 16:23:59.275659 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-f875x" Jan 21 16:23:59 crc kubenswrapper[5021]: I0121 16:23:59.379243 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-bh66m" Jan 21 16:23:59 crc kubenswrapper[5021]: I0121 16:23:59.381423 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bb81582b-4eb1-498c-a458-41c939b1eacd-utilities\") pod \"bb81582b-4eb1-498c-a458-41c939b1eacd\" (UID: \"bb81582b-4eb1-498c-a458-41c939b1eacd\") " Jan 21 16:23:59 crc kubenswrapper[5021]: I0121 16:23:59.381564 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bb81582b-4eb1-498c-a458-41c939b1eacd-catalog-content\") pod \"bb81582b-4eb1-498c-a458-41c939b1eacd\" (UID: \"bb81582b-4eb1-498c-a458-41c939b1eacd\") " Jan 21 16:23:59 crc kubenswrapper[5021]: I0121 16:23:59.381620 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g7vzx\" (UniqueName: \"kubernetes.io/projected/bb81582b-4eb1-498c-a458-41c939b1eacd-kube-api-access-g7vzx\") pod \"bb81582b-4eb1-498c-a458-41c939b1eacd\" (UID: \"bb81582b-4eb1-498c-a458-41c939b1eacd\") " Jan 21 16:23:59 crc kubenswrapper[5021]: I0121 16:23:59.382463 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bb81582b-4eb1-498c-a458-41c939b1eacd-utilities" (OuterVolumeSpecName: "utilities") pod "bb81582b-4eb1-498c-a458-41c939b1eacd" (UID: "bb81582b-4eb1-498c-a458-41c939b1eacd"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 16:23:59 crc kubenswrapper[5021]: I0121 16:23:59.386934 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bb81582b-4eb1-498c-a458-41c939b1eacd-kube-api-access-g7vzx" (OuterVolumeSpecName: "kube-api-access-g7vzx") pod "bb81582b-4eb1-498c-a458-41c939b1eacd" (UID: "bb81582b-4eb1-498c-a458-41c939b1eacd"). InnerVolumeSpecName "kube-api-access-g7vzx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 16:23:59 crc kubenswrapper[5021]: I0121 16:23:59.421340 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-bh66m" Jan 21 16:23:59 crc kubenswrapper[5021]: I0121 16:23:59.442482 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bb81582b-4eb1-498c-a458-41c939b1eacd-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bb81582b-4eb1-498c-a458-41c939b1eacd" (UID: "bb81582b-4eb1-498c-a458-41c939b1eacd"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 16:23:59 crc kubenswrapper[5021]: I0121 16:23:59.483681 5021 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bb81582b-4eb1-498c-a458-41c939b1eacd-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 16:23:59 crc kubenswrapper[5021]: I0121 16:23:59.483733 5021 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bb81582b-4eb1-498c-a458-41c939b1eacd-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 16:23:59 crc kubenswrapper[5021]: I0121 16:23:59.483750 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g7vzx\" (UniqueName: \"kubernetes.io/projected/bb81582b-4eb1-498c-a458-41c939b1eacd-kube-api-access-g7vzx\") on node \"crc\" DevicePath \"\"" Jan 21 16:23:59 crc kubenswrapper[5021]: I0121 16:23:59.966498 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bh66m"] Jan 21 16:24:00 crc kubenswrapper[5021]: I0121 16:24:00.011062 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f875x" event={"ID":"bb81582b-4eb1-498c-a458-41c939b1eacd","Type":"ContainerDied","Data":"115d23d7d112ce1f25c3ef7cdeaf9d63383ada4340d1a760ae645157cb892b40"} Jan 21 16:24:00 crc kubenswrapper[5021]: I0121 16:24:00.011108 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-f875x" Jan 21 16:24:00 crc kubenswrapper[5021]: I0121 16:24:00.011124 5021 scope.go:117] "RemoveContainer" containerID="82bdadc387d2d6b66dc8eb81581c465d117914facc0a98b0691daf30ea36ceb3" Jan 21 16:24:00 crc kubenswrapper[5021]: I0121 16:24:00.030617 5021 scope.go:117] "RemoveContainer" containerID="7e278a9ecf9b3ac04ebc6f1cc062c1f860756d84792874d89141849988385690" Jan 21 16:24:00 crc kubenswrapper[5021]: I0121 16:24:00.047557 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-f875x"] Jan 21 16:24:00 crc kubenswrapper[5021]: I0121 16:24:00.055345 5021 scope.go:117] "RemoveContainer" containerID="7e181b77f1ab3cea7a71c32a876ec74a5fd4113a5f0b1a8503eae06f41a557a1" Jan 21 16:24:00 crc kubenswrapper[5021]: I0121 16:24:00.057195 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-f875x"] Jan 21 16:24:00 crc kubenswrapper[5021]: I0121 16:24:00.751882 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bb81582b-4eb1-498c-a458-41c939b1eacd" path="/var/lib/kubelet/pods/bb81582b-4eb1-498c-a458-41c939b1eacd/volumes" Jan 21 16:24:01 crc kubenswrapper[5021]: I0121 16:24:01.017754 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-bh66m" podUID="5a648964-a42b-4f00-9c80-f3dbdf071df9" containerName="registry-server" containerID="cri-o://891b005e01e1d41009b2c679e0175eb5e8cd3e778e611bb2f144610560dc97dc" gracePeriod=2 Jan 21 16:24:01 crc kubenswrapper[5021]: I0121 16:24:01.378607 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bh66m" Jan 21 16:24:01 crc kubenswrapper[5021]: I0121 16:24:01.511869 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wvb89\" (UniqueName: \"kubernetes.io/projected/5a648964-a42b-4f00-9c80-f3dbdf071df9-kube-api-access-wvb89\") pod \"5a648964-a42b-4f00-9c80-f3dbdf071df9\" (UID: \"5a648964-a42b-4f00-9c80-f3dbdf071df9\") " Jan 21 16:24:01 crc kubenswrapper[5021]: I0121 16:24:01.511980 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a648964-a42b-4f00-9c80-f3dbdf071df9-utilities\") pod \"5a648964-a42b-4f00-9c80-f3dbdf071df9\" (UID: \"5a648964-a42b-4f00-9c80-f3dbdf071df9\") " Jan 21 16:24:01 crc kubenswrapper[5021]: I0121 16:24:01.512012 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a648964-a42b-4f00-9c80-f3dbdf071df9-catalog-content\") pod \"5a648964-a42b-4f00-9c80-f3dbdf071df9\" (UID: \"5a648964-a42b-4f00-9c80-f3dbdf071df9\") " Jan 21 16:24:01 crc kubenswrapper[5021]: I0121 16:24:01.513211 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5a648964-a42b-4f00-9c80-f3dbdf071df9-utilities" (OuterVolumeSpecName: "utilities") pod "5a648964-a42b-4f00-9c80-f3dbdf071df9" (UID: "5a648964-a42b-4f00-9c80-f3dbdf071df9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 16:24:01 crc kubenswrapper[5021]: I0121 16:24:01.521761 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5a648964-a42b-4f00-9c80-f3dbdf071df9-kube-api-access-wvb89" (OuterVolumeSpecName: "kube-api-access-wvb89") pod "5a648964-a42b-4f00-9c80-f3dbdf071df9" (UID: "5a648964-a42b-4f00-9c80-f3dbdf071df9"). InnerVolumeSpecName "kube-api-access-wvb89". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 16:24:01 crc kubenswrapper[5021]: I0121 16:24:01.577576 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5a648964-a42b-4f00-9c80-f3dbdf071df9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5a648964-a42b-4f00-9c80-f3dbdf071df9" (UID: "5a648964-a42b-4f00-9c80-f3dbdf071df9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 16:24:01 crc kubenswrapper[5021]: I0121 16:24:01.613089 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wvb89\" (UniqueName: \"kubernetes.io/projected/5a648964-a42b-4f00-9c80-f3dbdf071df9-kube-api-access-wvb89\") on node \"crc\" DevicePath \"\"" Jan 21 16:24:01 crc kubenswrapper[5021]: I0121 16:24:01.613133 5021 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a648964-a42b-4f00-9c80-f3dbdf071df9-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 16:24:01 crc kubenswrapper[5021]: I0121 16:24:01.613142 5021 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a648964-a42b-4f00-9c80-f3dbdf071df9-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 16:24:02 crc kubenswrapper[5021]: I0121 16:24:02.027460 5021 generic.go:334] "Generic (PLEG): container finished" podID="5a648964-a42b-4f00-9c80-f3dbdf071df9" containerID="891b005e01e1d41009b2c679e0175eb5e8cd3e778e611bb2f144610560dc97dc" exitCode=0 Jan 21 16:24:02 crc kubenswrapper[5021]: I0121 16:24:02.027491 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bh66m" event={"ID":"5a648964-a42b-4f00-9c80-f3dbdf071df9","Type":"ContainerDied","Data":"891b005e01e1d41009b2c679e0175eb5e8cd3e778e611bb2f144610560dc97dc"} Jan 21 16:24:02 crc kubenswrapper[5021]: I0121 16:24:02.027796 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bh66m" event={"ID":"5a648964-a42b-4f00-9c80-f3dbdf071df9","Type":"ContainerDied","Data":"cf651c91b7df5edc3e8652a07851a6b4c24ce5ef86bbb62149642b46a341f37a"} Jan 21 16:24:02 crc kubenswrapper[5021]: I0121 16:24:02.027819 5021 scope.go:117] "RemoveContainer" containerID="891b005e01e1d41009b2c679e0175eb5e8cd3e778e611bb2f144610560dc97dc" Jan 21 16:24:02 crc kubenswrapper[5021]: I0121 16:24:02.027533 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bh66m" Jan 21 16:24:02 crc kubenswrapper[5021]: I0121 16:24:02.067066 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bh66m"] Jan 21 16:24:02 crc kubenswrapper[5021]: I0121 16:24:02.070070 5021 scope.go:117] "RemoveContainer" containerID="fcb4651178d88bbe57e1d48e67862f5c985c9006f6533d9f925652f7a507e803" Jan 21 16:24:02 crc kubenswrapper[5021]: I0121 16:24:02.072365 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-bh66m"] Jan 21 16:24:02 crc kubenswrapper[5021]: I0121 16:24:02.086216 5021 scope.go:117] "RemoveContainer" containerID="34542756ac0a238eaeac1cbdf2a75fa499fa0bb6fafd2cbc186fdb2799fda142" Jan 21 16:24:02 crc kubenswrapper[5021]: I0121 16:24:02.108358 5021 scope.go:117] "RemoveContainer" containerID="891b005e01e1d41009b2c679e0175eb5e8cd3e778e611bb2f144610560dc97dc" Jan 21 16:24:02 crc kubenswrapper[5021]: E0121 16:24:02.108827 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"891b005e01e1d41009b2c679e0175eb5e8cd3e778e611bb2f144610560dc97dc\": container with ID starting with 891b005e01e1d41009b2c679e0175eb5e8cd3e778e611bb2f144610560dc97dc not found: ID does not exist" containerID="891b005e01e1d41009b2c679e0175eb5e8cd3e778e611bb2f144610560dc97dc" Jan 21 16:24:02 crc kubenswrapper[5021]: I0121 16:24:02.108866 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"891b005e01e1d41009b2c679e0175eb5e8cd3e778e611bb2f144610560dc97dc"} err="failed to get container status \"891b005e01e1d41009b2c679e0175eb5e8cd3e778e611bb2f144610560dc97dc\": rpc error: code = NotFound desc = could not find container \"891b005e01e1d41009b2c679e0175eb5e8cd3e778e611bb2f144610560dc97dc\": container with ID starting with 891b005e01e1d41009b2c679e0175eb5e8cd3e778e611bb2f144610560dc97dc not found: ID does not exist" Jan 21 16:24:02 crc kubenswrapper[5021]: I0121 16:24:02.108896 5021 scope.go:117] "RemoveContainer" containerID="fcb4651178d88bbe57e1d48e67862f5c985c9006f6533d9f925652f7a507e803" Jan 21 16:24:02 crc kubenswrapper[5021]: E0121 16:24:02.109397 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fcb4651178d88bbe57e1d48e67862f5c985c9006f6533d9f925652f7a507e803\": container with ID starting with fcb4651178d88bbe57e1d48e67862f5c985c9006f6533d9f925652f7a507e803 not found: ID does not exist" containerID="fcb4651178d88bbe57e1d48e67862f5c985c9006f6533d9f925652f7a507e803" Jan 21 16:24:02 crc kubenswrapper[5021]: I0121 16:24:02.109461 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fcb4651178d88bbe57e1d48e67862f5c985c9006f6533d9f925652f7a507e803"} err="failed to get container status \"fcb4651178d88bbe57e1d48e67862f5c985c9006f6533d9f925652f7a507e803\": rpc error: code = NotFound desc = could not find container \"fcb4651178d88bbe57e1d48e67862f5c985c9006f6533d9f925652f7a507e803\": container with ID starting with fcb4651178d88bbe57e1d48e67862f5c985c9006f6533d9f925652f7a507e803 not found: ID does not exist" Jan 21 16:24:02 crc kubenswrapper[5021]: I0121 16:24:02.109497 5021 scope.go:117] "RemoveContainer" containerID="34542756ac0a238eaeac1cbdf2a75fa499fa0bb6fafd2cbc186fdb2799fda142" Jan 21 16:24:02 crc kubenswrapper[5021]: E0121 16:24:02.109829 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"34542756ac0a238eaeac1cbdf2a75fa499fa0bb6fafd2cbc186fdb2799fda142\": container with ID starting with 34542756ac0a238eaeac1cbdf2a75fa499fa0bb6fafd2cbc186fdb2799fda142 not found: ID does not exist" containerID="34542756ac0a238eaeac1cbdf2a75fa499fa0bb6fafd2cbc186fdb2799fda142" Jan 21 16:24:02 crc kubenswrapper[5021]: I0121 16:24:02.109859 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"34542756ac0a238eaeac1cbdf2a75fa499fa0bb6fafd2cbc186fdb2799fda142"} err="failed to get container status \"34542756ac0a238eaeac1cbdf2a75fa499fa0bb6fafd2cbc186fdb2799fda142\": rpc error: code = NotFound desc = could not find container \"34542756ac0a238eaeac1cbdf2a75fa499fa0bb6fafd2cbc186fdb2799fda142\": container with ID starting with 34542756ac0a238eaeac1cbdf2a75fa499fa0bb6fafd2cbc186fdb2799fda142 not found: ID does not exist" Jan 21 16:24:02 crc kubenswrapper[5021]: I0121 16:24:02.745861 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5a648964-a42b-4f00-9c80-f3dbdf071df9" path="/var/lib/kubelet/pods/5a648964-a42b-4f00-9c80-f3dbdf071df9/volumes" Jan 21 16:25:12 crc kubenswrapper[5021]: I0121 16:25:12.357380 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 16:25:12 crc kubenswrapper[5021]: I0121 16:25:12.358095 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 16:25:42 crc kubenswrapper[5021]: I0121 16:25:42.357550 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 16:25:42 crc kubenswrapper[5021]: I0121 16:25:42.359113 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 16:26:12 crc kubenswrapper[5021]: I0121 16:26:12.356881 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 16:26:12 crc kubenswrapper[5021]: I0121 16:26:12.358058 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 16:26:12 crc kubenswrapper[5021]: I0121 16:26:12.358124 5021 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" Jan 21 16:26:12 crc kubenswrapper[5021]: I0121 16:26:12.358726 5021 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2b478fc442abbb775fd61836bc7953eb653808ff3f07906e91a7cce619b817f8"} pod="openshift-machine-config-operator/machine-config-daemon-n22xz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 21 16:26:12 crc kubenswrapper[5021]: I0121 16:26:12.358784 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" containerID="cri-o://2b478fc442abbb775fd61836bc7953eb653808ff3f07906e91a7cce619b817f8" gracePeriod=600 Jan 21 16:26:12 crc kubenswrapper[5021]: E0121 16:26:12.476294 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:26:12 crc kubenswrapper[5021]: I0121 16:26:12.941343 5021 generic.go:334] "Generic (PLEG): container finished" podID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerID="2b478fc442abbb775fd61836bc7953eb653808ff3f07906e91a7cce619b817f8" exitCode=0 Jan 21 16:26:12 crc kubenswrapper[5021]: I0121 16:26:12.941405 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" event={"ID":"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1","Type":"ContainerDied","Data":"2b478fc442abbb775fd61836bc7953eb653808ff3f07906e91a7cce619b817f8"} Jan 21 16:26:12 crc kubenswrapper[5021]: I0121 16:26:12.941507 5021 scope.go:117] "RemoveContainer" containerID="42655b6e9060bc485d886ddd971a3d61170627eb976c2b26db3bc35bf98546ed" Jan 21 16:26:12 crc kubenswrapper[5021]: I0121 16:26:12.942011 5021 scope.go:117] "RemoveContainer" containerID="2b478fc442abbb775fd61836bc7953eb653808ff3f07906e91a7cce619b817f8" Jan 21 16:26:12 crc kubenswrapper[5021]: E0121 16:26:12.942326 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:26:25 crc kubenswrapper[5021]: I0121 16:26:25.737626 5021 scope.go:117] "RemoveContainer" containerID="2b478fc442abbb775fd61836bc7953eb653808ff3f07906e91a7cce619b817f8" Jan 21 16:26:25 crc kubenswrapper[5021]: E0121 16:26:25.739304 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:26:36 crc kubenswrapper[5021]: I0121 16:26:36.738272 5021 scope.go:117] "RemoveContainer" containerID="2b478fc442abbb775fd61836bc7953eb653808ff3f07906e91a7cce619b817f8" Jan 21 16:26:36 crc kubenswrapper[5021]: E0121 16:26:36.739075 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:26:51 crc kubenswrapper[5021]: I0121 16:26:51.738263 5021 scope.go:117] "RemoveContainer" containerID="2b478fc442abbb775fd61836bc7953eb653808ff3f07906e91a7cce619b817f8" Jan 21 16:26:51 crc kubenswrapper[5021]: E0121 16:26:51.739491 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:27:04 crc kubenswrapper[5021]: I0121 16:27:04.737807 5021 scope.go:117] "RemoveContainer" containerID="2b478fc442abbb775fd61836bc7953eb653808ff3f07906e91a7cce619b817f8" Jan 21 16:27:04 crc kubenswrapper[5021]: E0121 16:27:04.739142 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:27:19 crc kubenswrapper[5021]: I0121 16:27:19.738613 5021 scope.go:117] "RemoveContainer" containerID="2b478fc442abbb775fd61836bc7953eb653808ff3f07906e91a7cce619b817f8" Jan 21 16:27:19 crc kubenswrapper[5021]: E0121 16:27:19.739443 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:27:32 crc kubenswrapper[5021]: I0121 16:27:32.738764 5021 scope.go:117] "RemoveContainer" containerID="2b478fc442abbb775fd61836bc7953eb653808ff3f07906e91a7cce619b817f8" Jan 21 16:27:32 crc kubenswrapper[5021]: E0121 16:27:32.739676 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:27:44 crc kubenswrapper[5021]: I0121 16:27:44.738021 5021 scope.go:117] "RemoveContainer" containerID="2b478fc442abbb775fd61836bc7953eb653808ff3f07906e91a7cce619b817f8" Jan 21 16:27:44 crc kubenswrapper[5021]: E0121 16:27:44.740195 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:27:57 crc kubenswrapper[5021]: I0121 16:27:57.738182 5021 scope.go:117] "RemoveContainer" containerID="2b478fc442abbb775fd61836bc7953eb653808ff3f07906e91a7cce619b817f8" Jan 21 16:27:57 crc kubenswrapper[5021]: E0121 16:27:57.739059 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:28:10 crc kubenswrapper[5021]: I0121 16:28:10.737951 5021 scope.go:117] "RemoveContainer" containerID="2b478fc442abbb775fd61836bc7953eb653808ff3f07906e91a7cce619b817f8" Jan 21 16:28:10 crc kubenswrapper[5021]: E0121 16:28:10.738937 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:28:25 crc kubenswrapper[5021]: I0121 16:28:25.737363 5021 scope.go:117] "RemoveContainer" containerID="2b478fc442abbb775fd61836bc7953eb653808ff3f07906e91a7cce619b817f8" Jan 21 16:28:25 crc kubenswrapper[5021]: E0121 16:28:25.738252 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:28:37 crc kubenswrapper[5021]: I0121 16:28:37.737881 5021 scope.go:117] "RemoveContainer" containerID="2b478fc442abbb775fd61836bc7953eb653808ff3f07906e91a7cce619b817f8" Jan 21 16:28:37 crc kubenswrapper[5021]: E0121 16:28:37.738663 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:28:52 crc kubenswrapper[5021]: I0121 16:28:52.738521 5021 scope.go:117] "RemoveContainer" containerID="2b478fc442abbb775fd61836bc7953eb653808ff3f07906e91a7cce619b817f8" Jan 21 16:28:52 crc kubenswrapper[5021]: E0121 16:28:52.739411 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:29:07 crc kubenswrapper[5021]: I0121 16:29:07.737869 5021 scope.go:117] "RemoveContainer" containerID="2b478fc442abbb775fd61836bc7953eb653808ff3f07906e91a7cce619b817f8" Jan 21 16:29:07 crc kubenswrapper[5021]: E0121 16:29:07.739280 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:29:22 crc kubenswrapper[5021]: I0121 16:29:22.737958 5021 scope.go:117] "RemoveContainer" containerID="2b478fc442abbb775fd61836bc7953eb653808ff3f07906e91a7cce619b817f8" Jan 21 16:29:22 crc kubenswrapper[5021]: E0121 16:29:22.738715 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:29:36 crc kubenswrapper[5021]: I0121 16:29:36.738014 5021 scope.go:117] "RemoveContainer" containerID="2b478fc442abbb775fd61836bc7953eb653808ff3f07906e91a7cce619b817f8" Jan 21 16:29:36 crc kubenswrapper[5021]: E0121 16:29:36.738886 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:29:47 crc kubenswrapper[5021]: I0121 16:29:47.738532 5021 scope.go:117] "RemoveContainer" containerID="2b478fc442abbb775fd61836bc7953eb653808ff3f07906e91a7cce619b817f8" Jan 21 16:29:47 crc kubenswrapper[5021]: E0121 16:29:47.739494 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:29:58 crc kubenswrapper[5021]: I0121 16:29:58.742006 5021 scope.go:117] "RemoveContainer" containerID="2b478fc442abbb775fd61836bc7953eb653808ff3f07906e91a7cce619b817f8" Jan 21 16:29:58 crc kubenswrapper[5021]: E0121 16:29:58.744054 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:30:00 crc kubenswrapper[5021]: I0121 16:30:00.193650 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483550-wds7w"] Jan 21 16:30:00 crc kubenswrapper[5021]: E0121 16:30:00.194320 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb81582b-4eb1-498c-a458-41c939b1eacd" containerName="extract-content" Jan 21 16:30:00 crc kubenswrapper[5021]: I0121 16:30:00.194335 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb81582b-4eb1-498c-a458-41c939b1eacd" containerName="extract-content" Jan 21 16:30:00 crc kubenswrapper[5021]: E0121 16:30:00.194351 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb81582b-4eb1-498c-a458-41c939b1eacd" containerName="registry-server" Jan 21 16:30:00 crc kubenswrapper[5021]: I0121 16:30:00.194358 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb81582b-4eb1-498c-a458-41c939b1eacd" containerName="registry-server" Jan 21 16:30:00 crc kubenswrapper[5021]: E0121 16:30:00.194370 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb81582b-4eb1-498c-a458-41c939b1eacd" containerName="extract-utilities" Jan 21 16:30:00 crc kubenswrapper[5021]: I0121 16:30:00.194377 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb81582b-4eb1-498c-a458-41c939b1eacd" containerName="extract-utilities" Jan 21 16:30:00 crc kubenswrapper[5021]: E0121 16:30:00.194386 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a648964-a42b-4f00-9c80-f3dbdf071df9" containerName="registry-server" Jan 21 16:30:00 crc kubenswrapper[5021]: I0121 16:30:00.194392 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a648964-a42b-4f00-9c80-f3dbdf071df9" containerName="registry-server" Jan 21 16:30:00 crc kubenswrapper[5021]: E0121 16:30:00.194409 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a648964-a42b-4f00-9c80-f3dbdf071df9" containerName="extract-utilities" Jan 21 16:30:00 crc kubenswrapper[5021]: I0121 16:30:00.194414 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a648964-a42b-4f00-9c80-f3dbdf071df9" containerName="extract-utilities" Jan 21 16:30:00 crc kubenswrapper[5021]: E0121 16:30:00.194424 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a648964-a42b-4f00-9c80-f3dbdf071df9" containerName="extract-content" Jan 21 16:30:00 crc kubenswrapper[5021]: I0121 16:30:00.194429 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a648964-a42b-4f00-9c80-f3dbdf071df9" containerName="extract-content" Jan 21 16:30:00 crc kubenswrapper[5021]: I0121 16:30:00.194561 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="bb81582b-4eb1-498c-a458-41c939b1eacd" containerName="registry-server" Jan 21 16:30:00 crc kubenswrapper[5021]: I0121 16:30:00.194582 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="5a648964-a42b-4f00-9c80-f3dbdf071df9" containerName="registry-server" Jan 21 16:30:00 crc kubenswrapper[5021]: I0121 16:30:00.195265 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483550-wds7w" Jan 21 16:30:00 crc kubenswrapper[5021]: I0121 16:30:00.197953 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 21 16:30:00 crc kubenswrapper[5021]: I0121 16:30:00.198022 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 21 16:30:00 crc kubenswrapper[5021]: I0121 16:30:00.210161 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483550-wds7w"] Jan 21 16:30:00 crc kubenswrapper[5021]: I0121 16:30:00.220365 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/441ab65e-1d3a-4d20-b550-cec74d08aa26-config-volume\") pod \"collect-profiles-29483550-wds7w\" (UID: \"441ab65e-1d3a-4d20-b550-cec74d08aa26\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483550-wds7w" Jan 21 16:30:00 crc kubenswrapper[5021]: I0121 16:30:00.220473 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/441ab65e-1d3a-4d20-b550-cec74d08aa26-secret-volume\") pod \"collect-profiles-29483550-wds7w\" (UID: \"441ab65e-1d3a-4d20-b550-cec74d08aa26\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483550-wds7w" Jan 21 16:30:00 crc kubenswrapper[5021]: I0121 16:30:00.221006 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8kplr\" (UniqueName: \"kubernetes.io/projected/441ab65e-1d3a-4d20-b550-cec74d08aa26-kube-api-access-8kplr\") pod \"collect-profiles-29483550-wds7w\" (UID: \"441ab65e-1d3a-4d20-b550-cec74d08aa26\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483550-wds7w" Jan 21 16:30:00 crc kubenswrapper[5021]: I0121 16:30:00.322234 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/441ab65e-1d3a-4d20-b550-cec74d08aa26-config-volume\") pod \"collect-profiles-29483550-wds7w\" (UID: \"441ab65e-1d3a-4d20-b550-cec74d08aa26\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483550-wds7w" Jan 21 16:30:00 crc kubenswrapper[5021]: I0121 16:30:00.322313 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/441ab65e-1d3a-4d20-b550-cec74d08aa26-secret-volume\") pod \"collect-profiles-29483550-wds7w\" (UID: \"441ab65e-1d3a-4d20-b550-cec74d08aa26\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483550-wds7w" Jan 21 16:30:00 crc kubenswrapper[5021]: I0121 16:30:00.322366 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8kplr\" (UniqueName: \"kubernetes.io/projected/441ab65e-1d3a-4d20-b550-cec74d08aa26-kube-api-access-8kplr\") pod \"collect-profiles-29483550-wds7w\" (UID: \"441ab65e-1d3a-4d20-b550-cec74d08aa26\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483550-wds7w" Jan 21 16:30:00 crc kubenswrapper[5021]: I0121 16:30:00.323353 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/441ab65e-1d3a-4d20-b550-cec74d08aa26-config-volume\") pod \"collect-profiles-29483550-wds7w\" (UID: \"441ab65e-1d3a-4d20-b550-cec74d08aa26\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483550-wds7w" Jan 21 16:30:00 crc kubenswrapper[5021]: I0121 16:30:00.330123 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/441ab65e-1d3a-4d20-b550-cec74d08aa26-secret-volume\") pod \"collect-profiles-29483550-wds7w\" (UID: \"441ab65e-1d3a-4d20-b550-cec74d08aa26\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483550-wds7w" Jan 21 16:30:00 crc kubenswrapper[5021]: I0121 16:30:00.341619 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8kplr\" (UniqueName: \"kubernetes.io/projected/441ab65e-1d3a-4d20-b550-cec74d08aa26-kube-api-access-8kplr\") pod \"collect-profiles-29483550-wds7w\" (UID: \"441ab65e-1d3a-4d20-b550-cec74d08aa26\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483550-wds7w" Jan 21 16:30:00 crc kubenswrapper[5021]: I0121 16:30:00.520741 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483550-wds7w" Jan 21 16:30:01 crc kubenswrapper[5021]: I0121 16:30:01.001615 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483550-wds7w"] Jan 21 16:30:01 crc kubenswrapper[5021]: I0121 16:30:01.531575 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29483550-wds7w" event={"ID":"441ab65e-1d3a-4d20-b550-cec74d08aa26","Type":"ContainerStarted","Data":"883d703fd987ea5360e52e82268cb87d751cd852d3555ed620dfab8e833b7793"} Jan 21 16:30:02 crc kubenswrapper[5021]: I0121 16:30:02.541066 5021 generic.go:334] "Generic (PLEG): container finished" podID="441ab65e-1d3a-4d20-b550-cec74d08aa26" containerID="f2c54e175fb6b10bca997b2d187cff1bba5c1c62c7725e434088fbea7e1609ff" exitCode=0 Jan 21 16:30:02 crc kubenswrapper[5021]: I0121 16:30:02.541147 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29483550-wds7w" event={"ID":"441ab65e-1d3a-4d20-b550-cec74d08aa26","Type":"ContainerDied","Data":"f2c54e175fb6b10bca997b2d187cff1bba5c1c62c7725e434088fbea7e1609ff"} Jan 21 16:30:03 crc kubenswrapper[5021]: I0121 16:30:03.839535 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483550-wds7w" Jan 21 16:30:03 crc kubenswrapper[5021]: I0121 16:30:03.882326 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/441ab65e-1d3a-4d20-b550-cec74d08aa26-secret-volume\") pod \"441ab65e-1d3a-4d20-b550-cec74d08aa26\" (UID: \"441ab65e-1d3a-4d20-b550-cec74d08aa26\") " Jan 21 16:30:03 crc kubenswrapper[5021]: I0121 16:30:03.883457 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8kplr\" (UniqueName: \"kubernetes.io/projected/441ab65e-1d3a-4d20-b550-cec74d08aa26-kube-api-access-8kplr\") pod \"441ab65e-1d3a-4d20-b550-cec74d08aa26\" (UID: \"441ab65e-1d3a-4d20-b550-cec74d08aa26\") " Jan 21 16:30:03 crc kubenswrapper[5021]: I0121 16:30:03.883559 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/441ab65e-1d3a-4d20-b550-cec74d08aa26-config-volume\") pod \"441ab65e-1d3a-4d20-b550-cec74d08aa26\" (UID: \"441ab65e-1d3a-4d20-b550-cec74d08aa26\") " Jan 21 16:30:03 crc kubenswrapper[5021]: I0121 16:30:03.884488 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/441ab65e-1d3a-4d20-b550-cec74d08aa26-config-volume" (OuterVolumeSpecName: "config-volume") pod "441ab65e-1d3a-4d20-b550-cec74d08aa26" (UID: "441ab65e-1d3a-4d20-b550-cec74d08aa26"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 16:30:03 crc kubenswrapper[5021]: I0121 16:30:03.887810 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/441ab65e-1d3a-4d20-b550-cec74d08aa26-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "441ab65e-1d3a-4d20-b550-cec74d08aa26" (UID: "441ab65e-1d3a-4d20-b550-cec74d08aa26"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 16:30:03 crc kubenswrapper[5021]: I0121 16:30:03.888149 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/441ab65e-1d3a-4d20-b550-cec74d08aa26-kube-api-access-8kplr" (OuterVolumeSpecName: "kube-api-access-8kplr") pod "441ab65e-1d3a-4d20-b550-cec74d08aa26" (UID: "441ab65e-1d3a-4d20-b550-cec74d08aa26"). InnerVolumeSpecName "kube-api-access-8kplr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 16:30:03 crc kubenswrapper[5021]: I0121 16:30:03.985899 5021 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/441ab65e-1d3a-4d20-b550-cec74d08aa26-config-volume\") on node \"crc\" DevicePath \"\"" Jan 21 16:30:03 crc kubenswrapper[5021]: I0121 16:30:03.986008 5021 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/441ab65e-1d3a-4d20-b550-cec74d08aa26-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 21 16:30:03 crc kubenswrapper[5021]: I0121 16:30:03.986024 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8kplr\" (UniqueName: \"kubernetes.io/projected/441ab65e-1d3a-4d20-b550-cec74d08aa26-kube-api-access-8kplr\") on node \"crc\" DevicePath \"\"" Jan 21 16:30:04 crc kubenswrapper[5021]: I0121 16:30:04.558449 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29483550-wds7w" event={"ID":"441ab65e-1d3a-4d20-b550-cec74d08aa26","Type":"ContainerDied","Data":"883d703fd987ea5360e52e82268cb87d751cd852d3555ed620dfab8e833b7793"} Jan 21 16:30:04 crc kubenswrapper[5021]: I0121 16:30:04.558493 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="883d703fd987ea5360e52e82268cb87d751cd852d3555ed620dfab8e833b7793" Jan 21 16:30:04 crc kubenswrapper[5021]: I0121 16:30:04.558927 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483550-wds7w" Jan 21 16:30:04 crc kubenswrapper[5021]: I0121 16:30:04.925433 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483505-n6gnz"] Jan 21 16:30:04 crc kubenswrapper[5021]: I0121 16:30:04.932529 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483505-n6gnz"] Jan 21 16:30:06 crc kubenswrapper[5021]: I0121 16:30:06.747563 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b3bf6b3-6efd-4f62-abec-5c886c910ed4" path="/var/lib/kubelet/pods/5b3bf6b3-6efd-4f62-abec-5c886c910ed4/volumes" Jan 21 16:30:12 crc kubenswrapper[5021]: I0121 16:30:12.737641 5021 scope.go:117] "RemoveContainer" containerID="2b478fc442abbb775fd61836bc7953eb653808ff3f07906e91a7cce619b817f8" Jan 21 16:30:12 crc kubenswrapper[5021]: E0121 16:30:12.738453 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:30:26 crc kubenswrapper[5021]: I0121 16:30:26.737881 5021 scope.go:117] "RemoveContainer" containerID="2b478fc442abbb775fd61836bc7953eb653808ff3f07906e91a7cce619b817f8" Jan 21 16:30:26 crc kubenswrapper[5021]: E0121 16:30:26.738783 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:30:39 crc kubenswrapper[5021]: I0121 16:30:39.739054 5021 scope.go:117] "RemoveContainer" containerID="2b478fc442abbb775fd61836bc7953eb653808ff3f07906e91a7cce619b817f8" Jan 21 16:30:39 crc kubenswrapper[5021]: E0121 16:30:39.740497 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:30:46 crc kubenswrapper[5021]: I0121 16:30:46.033549 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-p8fzj"] Jan 21 16:30:46 crc kubenswrapper[5021]: E0121 16:30:46.035743 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="441ab65e-1d3a-4d20-b550-cec74d08aa26" containerName="collect-profiles" Jan 21 16:30:46 crc kubenswrapper[5021]: I0121 16:30:46.035861 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="441ab65e-1d3a-4d20-b550-cec74d08aa26" containerName="collect-profiles" Jan 21 16:30:46 crc kubenswrapper[5021]: I0121 16:30:46.036152 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="441ab65e-1d3a-4d20-b550-cec74d08aa26" containerName="collect-profiles" Jan 21 16:30:46 crc kubenswrapper[5021]: I0121 16:30:46.037500 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p8fzj" Jan 21 16:30:46 crc kubenswrapper[5021]: I0121 16:30:46.043436 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-p8fzj"] Jan 21 16:30:46 crc kubenswrapper[5021]: I0121 16:30:46.130250 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cpww5\" (UniqueName: \"kubernetes.io/projected/8e91fcd5-29f5-4765-8141-8d96a790347f-kube-api-access-cpww5\") pod \"redhat-operators-p8fzj\" (UID: \"8e91fcd5-29f5-4765-8141-8d96a790347f\") " pod="openshift-marketplace/redhat-operators-p8fzj" Jan 21 16:30:46 crc kubenswrapper[5021]: I0121 16:30:46.130684 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e91fcd5-29f5-4765-8141-8d96a790347f-utilities\") pod \"redhat-operators-p8fzj\" (UID: \"8e91fcd5-29f5-4765-8141-8d96a790347f\") " pod="openshift-marketplace/redhat-operators-p8fzj" Jan 21 16:30:46 crc kubenswrapper[5021]: I0121 16:30:46.130740 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e91fcd5-29f5-4765-8141-8d96a790347f-catalog-content\") pod \"redhat-operators-p8fzj\" (UID: \"8e91fcd5-29f5-4765-8141-8d96a790347f\") " pod="openshift-marketplace/redhat-operators-p8fzj" Jan 21 16:30:46 crc kubenswrapper[5021]: I0121 16:30:46.231623 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e91fcd5-29f5-4765-8141-8d96a790347f-catalog-content\") pod \"redhat-operators-p8fzj\" (UID: \"8e91fcd5-29f5-4765-8141-8d96a790347f\") " pod="openshift-marketplace/redhat-operators-p8fzj" Jan 21 16:30:46 crc kubenswrapper[5021]: I0121 16:30:46.231753 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cpww5\" (UniqueName: \"kubernetes.io/projected/8e91fcd5-29f5-4765-8141-8d96a790347f-kube-api-access-cpww5\") pod \"redhat-operators-p8fzj\" (UID: \"8e91fcd5-29f5-4765-8141-8d96a790347f\") " pod="openshift-marketplace/redhat-operators-p8fzj" Jan 21 16:30:46 crc kubenswrapper[5021]: I0121 16:30:46.231811 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e91fcd5-29f5-4765-8141-8d96a790347f-utilities\") pod \"redhat-operators-p8fzj\" (UID: \"8e91fcd5-29f5-4765-8141-8d96a790347f\") " pod="openshift-marketplace/redhat-operators-p8fzj" Jan 21 16:30:46 crc kubenswrapper[5021]: I0121 16:30:46.232269 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e91fcd5-29f5-4765-8141-8d96a790347f-catalog-content\") pod \"redhat-operators-p8fzj\" (UID: \"8e91fcd5-29f5-4765-8141-8d96a790347f\") " pod="openshift-marketplace/redhat-operators-p8fzj" Jan 21 16:30:46 crc kubenswrapper[5021]: I0121 16:30:46.232324 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e91fcd5-29f5-4765-8141-8d96a790347f-utilities\") pod \"redhat-operators-p8fzj\" (UID: \"8e91fcd5-29f5-4765-8141-8d96a790347f\") " pod="openshift-marketplace/redhat-operators-p8fzj" Jan 21 16:30:46 crc kubenswrapper[5021]: I0121 16:30:46.258010 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cpww5\" (UniqueName: \"kubernetes.io/projected/8e91fcd5-29f5-4765-8141-8d96a790347f-kube-api-access-cpww5\") pod \"redhat-operators-p8fzj\" (UID: \"8e91fcd5-29f5-4765-8141-8d96a790347f\") " pod="openshift-marketplace/redhat-operators-p8fzj" Jan 21 16:30:46 crc kubenswrapper[5021]: I0121 16:30:46.356486 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p8fzj" Jan 21 16:30:46 crc kubenswrapper[5021]: I0121 16:30:46.875153 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-p8fzj"] Jan 21 16:30:47 crc kubenswrapper[5021]: I0121 16:30:47.882293 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p8fzj" event={"ID":"8e91fcd5-29f5-4765-8141-8d96a790347f","Type":"ContainerStarted","Data":"50df356d324ede56995546527a889167debd42d7548ca0a649b9e23f8399f8f6"} Jan 21 16:30:47 crc kubenswrapper[5021]: I0121 16:30:47.882688 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p8fzj" event={"ID":"8e91fcd5-29f5-4765-8141-8d96a790347f","Type":"ContainerStarted","Data":"449f33c650d1b89d0d1526cc9f6032d71673f3f072d47242c795da550430ffbb"} Jan 21 16:30:48 crc kubenswrapper[5021]: I0121 16:30:48.890045 5021 generic.go:334] "Generic (PLEG): container finished" podID="8e91fcd5-29f5-4765-8141-8d96a790347f" containerID="50df356d324ede56995546527a889167debd42d7548ca0a649b9e23f8399f8f6" exitCode=0 Jan 21 16:30:48 crc kubenswrapper[5021]: I0121 16:30:48.890140 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p8fzj" event={"ID":"8e91fcd5-29f5-4765-8141-8d96a790347f","Type":"ContainerDied","Data":"50df356d324ede56995546527a889167debd42d7548ca0a649b9e23f8399f8f6"} Jan 21 16:30:48 crc kubenswrapper[5021]: I0121 16:30:48.893033 5021 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 21 16:30:49 crc kubenswrapper[5021]: I0121 16:30:49.360028 5021 scope.go:117] "RemoveContainer" containerID="3e713eb40693d4274935a35169f18d43aa9df5f5c88681c4fe83e314963489e5" Jan 21 16:30:51 crc kubenswrapper[5021]: I0121 16:30:51.738137 5021 scope.go:117] "RemoveContainer" containerID="2b478fc442abbb775fd61836bc7953eb653808ff3f07906e91a7cce619b817f8" Jan 21 16:30:51 crc kubenswrapper[5021]: E0121 16:30:51.738982 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:30:51 crc kubenswrapper[5021]: I0121 16:30:51.915751 5021 generic.go:334] "Generic (PLEG): container finished" podID="8e91fcd5-29f5-4765-8141-8d96a790347f" containerID="e397267cd1f813c04983c3822a218ecbd72a4b36943683ed2a59d6ef42888658" exitCode=0 Jan 21 16:30:51 crc kubenswrapper[5021]: I0121 16:30:51.915806 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p8fzj" event={"ID":"8e91fcd5-29f5-4765-8141-8d96a790347f","Type":"ContainerDied","Data":"e397267cd1f813c04983c3822a218ecbd72a4b36943683ed2a59d6ef42888658"} Jan 21 16:30:52 crc kubenswrapper[5021]: I0121 16:30:52.926446 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p8fzj" event={"ID":"8e91fcd5-29f5-4765-8141-8d96a790347f","Type":"ContainerStarted","Data":"d3f67c4003379b56f26df5a30322aaa6e6f5f24a2f68d671ab319061a4040a69"} Jan 21 16:30:52 crc kubenswrapper[5021]: I0121 16:30:52.955143 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-p8fzj" podStartSLOduration=3.172814545 podStartE2EDuration="6.955116926s" podCreationTimestamp="2026-01-21 16:30:46 +0000 UTC" firstStartedPulling="2026-01-21 16:30:48.892651737 +0000 UTC m=+3990.427765626" lastFinishedPulling="2026-01-21 16:30:52.674954118 +0000 UTC m=+3994.210068007" observedRunningTime="2026-01-21 16:30:52.949437551 +0000 UTC m=+3994.484551460" watchObservedRunningTime="2026-01-21 16:30:52.955116926 +0000 UTC m=+3994.490230815" Jan 21 16:30:56 crc kubenswrapper[5021]: I0121 16:30:56.357583 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-p8fzj" Jan 21 16:30:56 crc kubenswrapper[5021]: I0121 16:30:56.358048 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-p8fzj" Jan 21 16:30:57 crc kubenswrapper[5021]: I0121 16:30:57.397784 5021 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-p8fzj" podUID="8e91fcd5-29f5-4765-8141-8d96a790347f" containerName="registry-server" probeResult="failure" output=< Jan 21 16:30:57 crc kubenswrapper[5021]: timeout: failed to connect service ":50051" within 1s Jan 21 16:30:57 crc kubenswrapper[5021]: > Jan 21 16:31:05 crc kubenswrapper[5021]: I0121 16:31:05.738827 5021 scope.go:117] "RemoveContainer" containerID="2b478fc442abbb775fd61836bc7953eb653808ff3f07906e91a7cce619b817f8" Jan 21 16:31:05 crc kubenswrapper[5021]: E0121 16:31:05.739700 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:31:06 crc kubenswrapper[5021]: I0121 16:31:06.653513 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-p8fzj" Jan 21 16:31:06 crc kubenswrapper[5021]: I0121 16:31:06.759760 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-p8fzj" Jan 21 16:31:06 crc kubenswrapper[5021]: I0121 16:31:06.903483 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-p8fzj"] Jan 21 16:31:08 crc kubenswrapper[5021]: I0121 16:31:08.024170 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-p8fzj" podUID="8e91fcd5-29f5-4765-8141-8d96a790347f" containerName="registry-server" containerID="cri-o://d3f67c4003379b56f26df5a30322aaa6e6f5f24a2f68d671ab319061a4040a69" gracePeriod=2 Jan 21 16:31:10 crc kubenswrapper[5021]: I0121 16:31:10.041039 5021 generic.go:334] "Generic (PLEG): container finished" podID="8e91fcd5-29f5-4765-8141-8d96a790347f" containerID="d3f67c4003379b56f26df5a30322aaa6e6f5f24a2f68d671ab319061a4040a69" exitCode=0 Jan 21 16:31:10 crc kubenswrapper[5021]: I0121 16:31:10.041103 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p8fzj" event={"ID":"8e91fcd5-29f5-4765-8141-8d96a790347f","Type":"ContainerDied","Data":"d3f67c4003379b56f26df5a30322aaa6e6f5f24a2f68d671ab319061a4040a69"} Jan 21 16:31:10 crc kubenswrapper[5021]: I0121 16:31:10.634217 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p8fzj" Jan 21 16:31:10 crc kubenswrapper[5021]: I0121 16:31:10.703513 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e91fcd5-29f5-4765-8141-8d96a790347f-catalog-content\") pod \"8e91fcd5-29f5-4765-8141-8d96a790347f\" (UID: \"8e91fcd5-29f5-4765-8141-8d96a790347f\") " Jan 21 16:31:10 crc kubenswrapper[5021]: I0121 16:31:10.703639 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e91fcd5-29f5-4765-8141-8d96a790347f-utilities\") pod \"8e91fcd5-29f5-4765-8141-8d96a790347f\" (UID: \"8e91fcd5-29f5-4765-8141-8d96a790347f\") " Jan 21 16:31:10 crc kubenswrapper[5021]: I0121 16:31:10.705717 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8e91fcd5-29f5-4765-8141-8d96a790347f-utilities" (OuterVolumeSpecName: "utilities") pod "8e91fcd5-29f5-4765-8141-8d96a790347f" (UID: "8e91fcd5-29f5-4765-8141-8d96a790347f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 16:31:10 crc kubenswrapper[5021]: I0121 16:31:10.805129 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cpww5\" (UniqueName: \"kubernetes.io/projected/8e91fcd5-29f5-4765-8141-8d96a790347f-kube-api-access-cpww5\") pod \"8e91fcd5-29f5-4765-8141-8d96a790347f\" (UID: \"8e91fcd5-29f5-4765-8141-8d96a790347f\") " Jan 21 16:31:10 crc kubenswrapper[5021]: I0121 16:31:10.805522 5021 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e91fcd5-29f5-4765-8141-8d96a790347f-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 16:31:10 crc kubenswrapper[5021]: I0121 16:31:10.812931 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e91fcd5-29f5-4765-8141-8d96a790347f-kube-api-access-cpww5" (OuterVolumeSpecName: "kube-api-access-cpww5") pod "8e91fcd5-29f5-4765-8141-8d96a790347f" (UID: "8e91fcd5-29f5-4765-8141-8d96a790347f"). InnerVolumeSpecName "kube-api-access-cpww5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 16:31:10 crc kubenswrapper[5021]: I0121 16:31:10.831942 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8e91fcd5-29f5-4765-8141-8d96a790347f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8e91fcd5-29f5-4765-8141-8d96a790347f" (UID: "8e91fcd5-29f5-4765-8141-8d96a790347f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 16:31:10 crc kubenswrapper[5021]: I0121 16:31:10.908592 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cpww5\" (UniqueName: \"kubernetes.io/projected/8e91fcd5-29f5-4765-8141-8d96a790347f-kube-api-access-cpww5\") on node \"crc\" DevicePath \"\"" Jan 21 16:31:10 crc kubenswrapper[5021]: I0121 16:31:10.908940 5021 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e91fcd5-29f5-4765-8141-8d96a790347f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 16:31:11 crc kubenswrapper[5021]: I0121 16:31:11.052485 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p8fzj" event={"ID":"8e91fcd5-29f5-4765-8141-8d96a790347f","Type":"ContainerDied","Data":"449f33c650d1b89d0d1526cc9f6032d71673f3f072d47242c795da550430ffbb"} Jan 21 16:31:11 crc kubenswrapper[5021]: I0121 16:31:11.052537 5021 scope.go:117] "RemoveContainer" containerID="d3f67c4003379b56f26df5a30322aaa6e6f5f24a2f68d671ab319061a4040a69" Jan 21 16:31:11 crc kubenswrapper[5021]: I0121 16:31:11.052599 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p8fzj" Jan 21 16:31:11 crc kubenswrapper[5021]: I0121 16:31:11.073209 5021 scope.go:117] "RemoveContainer" containerID="e397267cd1f813c04983c3822a218ecbd72a4b36943683ed2a59d6ef42888658" Jan 21 16:31:11 crc kubenswrapper[5021]: I0121 16:31:11.092437 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-p8fzj"] Jan 21 16:31:11 crc kubenswrapper[5021]: I0121 16:31:11.099141 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-p8fzj"] Jan 21 16:31:11 crc kubenswrapper[5021]: I0121 16:31:11.128099 5021 scope.go:117] "RemoveContainer" containerID="50df356d324ede56995546527a889167debd42d7548ca0a649b9e23f8399f8f6" Jan 21 16:31:12 crc kubenswrapper[5021]: I0121 16:31:12.748623 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8e91fcd5-29f5-4765-8141-8d96a790347f" path="/var/lib/kubelet/pods/8e91fcd5-29f5-4765-8141-8d96a790347f/volumes" Jan 21 16:31:19 crc kubenswrapper[5021]: I0121 16:31:19.738037 5021 scope.go:117] "RemoveContainer" containerID="2b478fc442abbb775fd61836bc7953eb653808ff3f07906e91a7cce619b817f8" Jan 21 16:31:20 crc kubenswrapper[5021]: I0121 16:31:20.117147 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" event={"ID":"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1","Type":"ContainerStarted","Data":"acd17d6b49cc44519ee1a965e036044fd9efc4ca3c60a27b9cd1b8e7c06061eb"} Jan 21 16:32:35 crc kubenswrapper[5021]: I0121 16:32:35.561775 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-zcvqv"] Jan 21 16:32:35 crc kubenswrapper[5021]: E0121 16:32:35.562705 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e91fcd5-29f5-4765-8141-8d96a790347f" containerName="extract-utilities" Jan 21 16:32:35 crc kubenswrapper[5021]: I0121 16:32:35.562721 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e91fcd5-29f5-4765-8141-8d96a790347f" containerName="extract-utilities" Jan 21 16:32:35 crc kubenswrapper[5021]: E0121 16:32:35.562747 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e91fcd5-29f5-4765-8141-8d96a790347f" containerName="registry-server" Jan 21 16:32:35 crc kubenswrapper[5021]: I0121 16:32:35.562754 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e91fcd5-29f5-4765-8141-8d96a790347f" containerName="registry-server" Jan 21 16:32:35 crc kubenswrapper[5021]: E0121 16:32:35.562775 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e91fcd5-29f5-4765-8141-8d96a790347f" containerName="extract-content" Jan 21 16:32:35 crc kubenswrapper[5021]: I0121 16:32:35.562783 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e91fcd5-29f5-4765-8141-8d96a790347f" containerName="extract-content" Jan 21 16:32:35 crc kubenswrapper[5021]: I0121 16:32:35.563030 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e91fcd5-29f5-4765-8141-8d96a790347f" containerName="registry-server" Jan 21 16:32:35 crc kubenswrapper[5021]: I0121 16:32:35.568515 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zcvqv" Jan 21 16:32:35 crc kubenswrapper[5021]: I0121 16:32:35.572594 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-zcvqv"] Jan 21 16:32:35 crc kubenswrapper[5021]: I0121 16:32:35.642514 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mwx6t\" (UniqueName: \"kubernetes.io/projected/15f872a0-4310-43ec-9377-354bc46ab302-kube-api-access-mwx6t\") pod \"redhat-marketplace-zcvqv\" (UID: \"15f872a0-4310-43ec-9377-354bc46ab302\") " pod="openshift-marketplace/redhat-marketplace-zcvqv" Jan 21 16:32:35 crc kubenswrapper[5021]: I0121 16:32:35.642573 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/15f872a0-4310-43ec-9377-354bc46ab302-utilities\") pod \"redhat-marketplace-zcvqv\" (UID: \"15f872a0-4310-43ec-9377-354bc46ab302\") " pod="openshift-marketplace/redhat-marketplace-zcvqv" Jan 21 16:32:35 crc kubenswrapper[5021]: I0121 16:32:35.642642 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/15f872a0-4310-43ec-9377-354bc46ab302-catalog-content\") pod \"redhat-marketplace-zcvqv\" (UID: \"15f872a0-4310-43ec-9377-354bc46ab302\") " pod="openshift-marketplace/redhat-marketplace-zcvqv" Jan 21 16:32:35 crc kubenswrapper[5021]: I0121 16:32:35.744141 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mwx6t\" (UniqueName: \"kubernetes.io/projected/15f872a0-4310-43ec-9377-354bc46ab302-kube-api-access-mwx6t\") pod \"redhat-marketplace-zcvqv\" (UID: \"15f872a0-4310-43ec-9377-354bc46ab302\") " pod="openshift-marketplace/redhat-marketplace-zcvqv" Jan 21 16:32:35 crc kubenswrapper[5021]: I0121 16:32:35.744194 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/15f872a0-4310-43ec-9377-354bc46ab302-utilities\") pod \"redhat-marketplace-zcvqv\" (UID: \"15f872a0-4310-43ec-9377-354bc46ab302\") " pod="openshift-marketplace/redhat-marketplace-zcvqv" Jan 21 16:32:35 crc kubenswrapper[5021]: I0121 16:32:35.744859 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/15f872a0-4310-43ec-9377-354bc46ab302-utilities\") pod \"redhat-marketplace-zcvqv\" (UID: \"15f872a0-4310-43ec-9377-354bc46ab302\") " pod="openshift-marketplace/redhat-marketplace-zcvqv" Jan 21 16:32:35 crc kubenswrapper[5021]: I0121 16:32:35.745007 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/15f872a0-4310-43ec-9377-354bc46ab302-catalog-content\") pod \"redhat-marketplace-zcvqv\" (UID: \"15f872a0-4310-43ec-9377-354bc46ab302\") " pod="openshift-marketplace/redhat-marketplace-zcvqv" Jan 21 16:32:35 crc kubenswrapper[5021]: I0121 16:32:35.745333 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/15f872a0-4310-43ec-9377-354bc46ab302-catalog-content\") pod \"redhat-marketplace-zcvqv\" (UID: \"15f872a0-4310-43ec-9377-354bc46ab302\") " pod="openshift-marketplace/redhat-marketplace-zcvqv" Jan 21 16:32:35 crc kubenswrapper[5021]: I0121 16:32:35.763738 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mwx6t\" (UniqueName: \"kubernetes.io/projected/15f872a0-4310-43ec-9377-354bc46ab302-kube-api-access-mwx6t\") pod \"redhat-marketplace-zcvqv\" (UID: \"15f872a0-4310-43ec-9377-354bc46ab302\") " pod="openshift-marketplace/redhat-marketplace-zcvqv" Jan 21 16:32:35 crc kubenswrapper[5021]: I0121 16:32:35.887600 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zcvqv" Jan 21 16:32:36 crc kubenswrapper[5021]: I0121 16:32:36.346823 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-zcvqv"] Jan 21 16:32:36 crc kubenswrapper[5021]: I0121 16:32:36.637620 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zcvqv" event={"ID":"15f872a0-4310-43ec-9377-354bc46ab302","Type":"ContainerStarted","Data":"a9ff7f292681fd89aee397b29154fa822c91dbfe8bfd711e9ec010ea870efda9"} Jan 21 16:32:37 crc kubenswrapper[5021]: I0121 16:32:37.647403 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zcvqv" event={"ID":"15f872a0-4310-43ec-9377-354bc46ab302","Type":"ContainerStarted","Data":"51f526280b18bc80bf9867372000906d3deba3b4f7fd054f97ee37d433dffce9"} Jan 21 16:32:38 crc kubenswrapper[5021]: I0121 16:32:38.657295 5021 generic.go:334] "Generic (PLEG): container finished" podID="15f872a0-4310-43ec-9377-354bc46ab302" containerID="51f526280b18bc80bf9867372000906d3deba3b4f7fd054f97ee37d433dffce9" exitCode=0 Jan 21 16:32:38 crc kubenswrapper[5021]: I0121 16:32:38.657353 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zcvqv" event={"ID":"15f872a0-4310-43ec-9377-354bc46ab302","Type":"ContainerDied","Data":"51f526280b18bc80bf9867372000906d3deba3b4f7fd054f97ee37d433dffce9"} Jan 21 16:32:41 crc kubenswrapper[5021]: I0121 16:32:41.679943 5021 generic.go:334] "Generic (PLEG): container finished" podID="15f872a0-4310-43ec-9377-354bc46ab302" containerID="eed1df70560b98d59decf712782c5392ed6cf4cd07945f0b5b61f2696197be4f" exitCode=0 Jan 21 16:32:41 crc kubenswrapper[5021]: I0121 16:32:41.680042 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zcvqv" event={"ID":"15f872a0-4310-43ec-9377-354bc46ab302","Type":"ContainerDied","Data":"eed1df70560b98d59decf712782c5392ed6cf4cd07945f0b5b61f2696197be4f"} Jan 21 16:32:42 crc kubenswrapper[5021]: I0121 16:32:42.690666 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zcvqv" event={"ID":"15f872a0-4310-43ec-9377-354bc46ab302","Type":"ContainerStarted","Data":"56b34487b26765b7fd94678b07de5a2bada1de72e0a436e7611efb37e7b25ee8"} Jan 21 16:32:42 crc kubenswrapper[5021]: I0121 16:32:42.714415 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-zcvqv" podStartSLOduration=5.00154249 podStartE2EDuration="7.714392254s" podCreationTimestamp="2026-01-21 16:32:35 +0000 UTC" firstStartedPulling="2026-01-21 16:32:39.66554527 +0000 UTC m=+4101.200659159" lastFinishedPulling="2026-01-21 16:32:42.378395034 +0000 UTC m=+4103.913508923" observedRunningTime="2026-01-21 16:32:42.712879333 +0000 UTC m=+4104.247993232" watchObservedRunningTime="2026-01-21 16:32:42.714392254 +0000 UTC m=+4104.249506153" Jan 21 16:32:45 crc kubenswrapper[5021]: I0121 16:32:45.887948 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-zcvqv" Jan 21 16:32:45 crc kubenswrapper[5021]: I0121 16:32:45.888587 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-zcvqv" Jan 21 16:32:45 crc kubenswrapper[5021]: I0121 16:32:45.931936 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-zcvqv" Jan 21 16:32:55 crc kubenswrapper[5021]: I0121 16:32:55.929669 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-zcvqv" Jan 21 16:32:55 crc kubenswrapper[5021]: I0121 16:32:55.976302 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-zcvqv"] Jan 21 16:32:56 crc kubenswrapper[5021]: I0121 16:32:56.777355 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-zcvqv" podUID="15f872a0-4310-43ec-9377-354bc46ab302" containerName="registry-server" containerID="cri-o://56b34487b26765b7fd94678b07de5a2bada1de72e0a436e7611efb37e7b25ee8" gracePeriod=2 Jan 21 16:32:57 crc kubenswrapper[5021]: I0121 16:32:57.785376 5021 generic.go:334] "Generic (PLEG): container finished" podID="15f872a0-4310-43ec-9377-354bc46ab302" containerID="56b34487b26765b7fd94678b07de5a2bada1de72e0a436e7611efb37e7b25ee8" exitCode=0 Jan 21 16:32:57 crc kubenswrapper[5021]: I0121 16:32:57.785419 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zcvqv" event={"ID":"15f872a0-4310-43ec-9377-354bc46ab302","Type":"ContainerDied","Data":"56b34487b26765b7fd94678b07de5a2bada1de72e0a436e7611efb37e7b25ee8"} Jan 21 16:32:59 crc kubenswrapper[5021]: I0121 16:32:59.017263 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zcvqv" Jan 21 16:32:59 crc kubenswrapper[5021]: I0121 16:32:59.114989 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/15f872a0-4310-43ec-9377-354bc46ab302-catalog-content\") pod \"15f872a0-4310-43ec-9377-354bc46ab302\" (UID: \"15f872a0-4310-43ec-9377-354bc46ab302\") " Jan 21 16:32:59 crc kubenswrapper[5021]: I0121 16:32:59.115080 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mwx6t\" (UniqueName: \"kubernetes.io/projected/15f872a0-4310-43ec-9377-354bc46ab302-kube-api-access-mwx6t\") pod \"15f872a0-4310-43ec-9377-354bc46ab302\" (UID: \"15f872a0-4310-43ec-9377-354bc46ab302\") " Jan 21 16:32:59 crc kubenswrapper[5021]: I0121 16:32:59.115208 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/15f872a0-4310-43ec-9377-354bc46ab302-utilities\") pod \"15f872a0-4310-43ec-9377-354bc46ab302\" (UID: \"15f872a0-4310-43ec-9377-354bc46ab302\") " Jan 21 16:32:59 crc kubenswrapper[5021]: I0121 16:32:59.116422 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/15f872a0-4310-43ec-9377-354bc46ab302-utilities" (OuterVolumeSpecName: "utilities") pod "15f872a0-4310-43ec-9377-354bc46ab302" (UID: "15f872a0-4310-43ec-9377-354bc46ab302"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 16:32:59 crc kubenswrapper[5021]: I0121 16:32:59.120823 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/15f872a0-4310-43ec-9377-354bc46ab302-kube-api-access-mwx6t" (OuterVolumeSpecName: "kube-api-access-mwx6t") pod "15f872a0-4310-43ec-9377-354bc46ab302" (UID: "15f872a0-4310-43ec-9377-354bc46ab302"). InnerVolumeSpecName "kube-api-access-mwx6t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 16:32:59 crc kubenswrapper[5021]: I0121 16:32:59.138143 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/15f872a0-4310-43ec-9377-354bc46ab302-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "15f872a0-4310-43ec-9377-354bc46ab302" (UID: "15f872a0-4310-43ec-9377-354bc46ab302"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 16:32:59 crc kubenswrapper[5021]: I0121 16:32:59.217616 5021 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/15f872a0-4310-43ec-9377-354bc46ab302-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 16:32:59 crc kubenswrapper[5021]: I0121 16:32:59.217929 5021 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/15f872a0-4310-43ec-9377-354bc46ab302-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 16:32:59 crc kubenswrapper[5021]: I0121 16:32:59.218012 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mwx6t\" (UniqueName: \"kubernetes.io/projected/15f872a0-4310-43ec-9377-354bc46ab302-kube-api-access-mwx6t\") on node \"crc\" DevicePath \"\"" Jan 21 16:32:59 crc kubenswrapper[5021]: I0121 16:32:59.802576 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zcvqv" event={"ID":"15f872a0-4310-43ec-9377-354bc46ab302","Type":"ContainerDied","Data":"a9ff7f292681fd89aee397b29154fa822c91dbfe8bfd711e9ec010ea870efda9"} Jan 21 16:32:59 crc kubenswrapper[5021]: I0121 16:32:59.802639 5021 scope.go:117] "RemoveContainer" containerID="56b34487b26765b7fd94678b07de5a2bada1de72e0a436e7611efb37e7b25ee8" Jan 21 16:32:59 crc kubenswrapper[5021]: I0121 16:32:59.802661 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zcvqv" Jan 21 16:32:59 crc kubenswrapper[5021]: I0121 16:32:59.821282 5021 scope.go:117] "RemoveContainer" containerID="eed1df70560b98d59decf712782c5392ed6cf4cd07945f0b5b61f2696197be4f" Jan 21 16:32:59 crc kubenswrapper[5021]: I0121 16:32:59.841735 5021 scope.go:117] "RemoveContainer" containerID="51f526280b18bc80bf9867372000906d3deba3b4f7fd054f97ee37d433dffce9" Jan 21 16:32:59 crc kubenswrapper[5021]: I0121 16:32:59.844703 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-zcvqv"] Jan 21 16:32:59 crc kubenswrapper[5021]: I0121 16:32:59.858322 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-zcvqv"] Jan 21 16:33:00 crc kubenswrapper[5021]: I0121 16:33:00.745512 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="15f872a0-4310-43ec-9377-354bc46ab302" path="/var/lib/kubelet/pods/15f872a0-4310-43ec-9377-354bc46ab302/volumes" Jan 21 16:33:40 crc kubenswrapper[5021]: I0121 16:33:40.335711 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-qfsqt"] Jan 21 16:33:40 crc kubenswrapper[5021]: E0121 16:33:40.336642 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15f872a0-4310-43ec-9377-354bc46ab302" containerName="registry-server" Jan 21 16:33:40 crc kubenswrapper[5021]: I0121 16:33:40.336659 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="15f872a0-4310-43ec-9377-354bc46ab302" containerName="registry-server" Jan 21 16:33:40 crc kubenswrapper[5021]: E0121 16:33:40.336675 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15f872a0-4310-43ec-9377-354bc46ab302" containerName="extract-utilities" Jan 21 16:33:40 crc kubenswrapper[5021]: I0121 16:33:40.336681 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="15f872a0-4310-43ec-9377-354bc46ab302" containerName="extract-utilities" Jan 21 16:33:40 crc kubenswrapper[5021]: E0121 16:33:40.336704 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15f872a0-4310-43ec-9377-354bc46ab302" containerName="extract-content" Jan 21 16:33:40 crc kubenswrapper[5021]: I0121 16:33:40.336710 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="15f872a0-4310-43ec-9377-354bc46ab302" containerName="extract-content" Jan 21 16:33:40 crc kubenswrapper[5021]: I0121 16:33:40.336879 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="15f872a0-4310-43ec-9377-354bc46ab302" containerName="registry-server" Jan 21 16:33:40 crc kubenswrapper[5021]: I0121 16:33:40.338452 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qfsqt" Jan 21 16:33:40 crc kubenswrapper[5021]: I0121 16:33:40.346706 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qfsqt"] Jan 21 16:33:40 crc kubenswrapper[5021]: I0121 16:33:40.432580 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0-catalog-content\") pod \"community-operators-qfsqt\" (UID: \"57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0\") " pod="openshift-marketplace/community-operators-qfsqt" Jan 21 16:33:40 crc kubenswrapper[5021]: I0121 16:33:40.432653 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t2b7d\" (UniqueName: \"kubernetes.io/projected/57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0-kube-api-access-t2b7d\") pod \"community-operators-qfsqt\" (UID: \"57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0\") " pod="openshift-marketplace/community-operators-qfsqt" Jan 21 16:33:40 crc kubenswrapper[5021]: I0121 16:33:40.432680 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0-utilities\") pod \"community-operators-qfsqt\" (UID: \"57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0\") " pod="openshift-marketplace/community-operators-qfsqt" Jan 21 16:33:40 crc kubenswrapper[5021]: I0121 16:33:40.534560 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0-catalog-content\") pod \"community-operators-qfsqt\" (UID: \"57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0\") " pod="openshift-marketplace/community-operators-qfsqt" Jan 21 16:33:40 crc kubenswrapper[5021]: I0121 16:33:40.534639 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t2b7d\" (UniqueName: \"kubernetes.io/projected/57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0-kube-api-access-t2b7d\") pod \"community-operators-qfsqt\" (UID: \"57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0\") " pod="openshift-marketplace/community-operators-qfsqt" Jan 21 16:33:40 crc kubenswrapper[5021]: I0121 16:33:40.534674 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0-utilities\") pod \"community-operators-qfsqt\" (UID: \"57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0\") " pod="openshift-marketplace/community-operators-qfsqt" Jan 21 16:33:40 crc kubenswrapper[5021]: I0121 16:33:40.535205 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0-utilities\") pod \"community-operators-qfsqt\" (UID: \"57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0\") " pod="openshift-marketplace/community-operators-qfsqt" Jan 21 16:33:40 crc kubenswrapper[5021]: I0121 16:33:40.535202 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0-catalog-content\") pod \"community-operators-qfsqt\" (UID: \"57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0\") " pod="openshift-marketplace/community-operators-qfsqt" Jan 21 16:33:40 crc kubenswrapper[5021]: I0121 16:33:40.568337 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t2b7d\" (UniqueName: \"kubernetes.io/projected/57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0-kube-api-access-t2b7d\") pod \"community-operators-qfsqt\" (UID: \"57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0\") " pod="openshift-marketplace/community-operators-qfsqt" Jan 21 16:33:40 crc kubenswrapper[5021]: I0121 16:33:40.657640 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qfsqt" Jan 21 16:33:40 crc kubenswrapper[5021]: I0121 16:33:40.949831 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qfsqt"] Jan 21 16:33:41 crc kubenswrapper[5021]: I0121 16:33:41.075969 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qfsqt" event={"ID":"57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0","Type":"ContainerStarted","Data":"605050568a2dbfc1813ca1d014cfa4131eab8a3b87d5bfd98a1e8d9a7d0d3567"} Jan 21 16:33:42 crc kubenswrapper[5021]: I0121 16:33:42.083256 5021 generic.go:334] "Generic (PLEG): container finished" podID="57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0" containerID="9642b494c067a57bad5bb16c11727ecf6f50e7d4b51e8871ca00479f0409e618" exitCode=0 Jan 21 16:33:42 crc kubenswrapper[5021]: I0121 16:33:42.083304 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qfsqt" event={"ID":"57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0","Type":"ContainerDied","Data":"9642b494c067a57bad5bb16c11727ecf6f50e7d4b51e8871ca00479f0409e618"} Jan 21 16:33:42 crc kubenswrapper[5021]: I0121 16:33:42.357174 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 16:33:42 crc kubenswrapper[5021]: I0121 16:33:42.357497 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 16:33:45 crc kubenswrapper[5021]: I0121 16:33:45.105292 5021 generic.go:334] "Generic (PLEG): container finished" podID="57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0" containerID="0664c35973f8397e2a99e5a9cac2a6aea00f4a0fb39163cc1c776131f038b639" exitCode=0 Jan 21 16:33:45 crc kubenswrapper[5021]: I0121 16:33:45.105368 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qfsqt" event={"ID":"57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0","Type":"ContainerDied","Data":"0664c35973f8397e2a99e5a9cac2a6aea00f4a0fb39163cc1c776131f038b639"} Jan 21 16:33:46 crc kubenswrapper[5021]: I0121 16:33:46.113882 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qfsqt" event={"ID":"57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0","Type":"ContainerStarted","Data":"5089b1512ac5677d44597e470026940fc312b1f86618b186f8b5a4421434c858"} Jan 21 16:33:46 crc kubenswrapper[5021]: I0121 16:33:46.134817 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-qfsqt" podStartSLOduration=2.674656391 podStartE2EDuration="6.134796719s" podCreationTimestamp="2026-01-21 16:33:40 +0000 UTC" firstStartedPulling="2026-01-21 16:33:42.08480567 +0000 UTC m=+4163.619919559" lastFinishedPulling="2026-01-21 16:33:45.544945998 +0000 UTC m=+4167.080059887" observedRunningTime="2026-01-21 16:33:46.131965372 +0000 UTC m=+4167.667079261" watchObservedRunningTime="2026-01-21 16:33:46.134796719 +0000 UTC m=+4167.669910608" Jan 21 16:33:50 crc kubenswrapper[5021]: I0121 16:33:50.657793 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-qfsqt" Jan 21 16:33:50 crc kubenswrapper[5021]: I0121 16:33:50.658354 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-qfsqt" Jan 21 16:33:50 crc kubenswrapper[5021]: I0121 16:33:50.703841 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-qfsqt" Jan 21 16:33:51 crc kubenswrapper[5021]: I0121 16:33:51.181783 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-qfsqt" Jan 21 16:33:51 crc kubenswrapper[5021]: I0121 16:33:51.238597 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-qfsqt"] Jan 21 16:33:53 crc kubenswrapper[5021]: I0121 16:33:53.156058 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-qfsqt" podUID="57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0" containerName="registry-server" containerID="cri-o://5089b1512ac5677d44597e470026940fc312b1f86618b186f8b5a4421434c858" gracePeriod=2 Jan 21 16:33:54 crc kubenswrapper[5021]: I0121 16:33:54.044598 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qfsqt" Jan 21 16:33:54 crc kubenswrapper[5021]: I0121 16:33:54.130728 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0-catalog-content\") pod \"57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0\" (UID: \"57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0\") " Jan 21 16:33:54 crc kubenswrapper[5021]: I0121 16:33:54.130797 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t2b7d\" (UniqueName: \"kubernetes.io/projected/57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0-kube-api-access-t2b7d\") pod \"57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0\" (UID: \"57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0\") " Jan 21 16:33:54 crc kubenswrapper[5021]: I0121 16:33:54.130975 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0-utilities\") pod \"57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0\" (UID: \"57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0\") " Jan 21 16:33:54 crc kubenswrapper[5021]: I0121 16:33:54.132382 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0-utilities" (OuterVolumeSpecName: "utilities") pod "57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0" (UID: "57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 16:33:54 crc kubenswrapper[5021]: I0121 16:33:54.150164 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0-kube-api-access-t2b7d" (OuterVolumeSpecName: "kube-api-access-t2b7d") pod "57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0" (UID: "57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0"). InnerVolumeSpecName "kube-api-access-t2b7d". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 16:33:54 crc kubenswrapper[5021]: I0121 16:33:54.164842 5021 generic.go:334] "Generic (PLEG): container finished" podID="57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0" containerID="5089b1512ac5677d44597e470026940fc312b1f86618b186f8b5a4421434c858" exitCode=0 Jan 21 16:33:54 crc kubenswrapper[5021]: I0121 16:33:54.164935 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qfsqt" event={"ID":"57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0","Type":"ContainerDied","Data":"5089b1512ac5677d44597e470026940fc312b1f86618b186f8b5a4421434c858"} Jan 21 16:33:54 crc kubenswrapper[5021]: I0121 16:33:54.164968 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qfsqt" event={"ID":"57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0","Type":"ContainerDied","Data":"605050568a2dbfc1813ca1d014cfa4131eab8a3b87d5bfd98a1e8d9a7d0d3567"} Jan 21 16:33:54 crc kubenswrapper[5021]: I0121 16:33:54.164980 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qfsqt" Jan 21 16:33:54 crc kubenswrapper[5021]: I0121 16:33:54.164985 5021 scope.go:117] "RemoveContainer" containerID="5089b1512ac5677d44597e470026940fc312b1f86618b186f8b5a4421434c858" Jan 21 16:33:54 crc kubenswrapper[5021]: I0121 16:33:54.197284 5021 scope.go:117] "RemoveContainer" containerID="0664c35973f8397e2a99e5a9cac2a6aea00f4a0fb39163cc1c776131f038b639" Jan 21 16:33:54 crc kubenswrapper[5021]: I0121 16:33:54.199142 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0" (UID: "57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 16:33:54 crc kubenswrapper[5021]: I0121 16:33:54.219985 5021 scope.go:117] "RemoveContainer" containerID="9642b494c067a57bad5bb16c11727ecf6f50e7d4b51e8871ca00479f0409e618" Jan 21 16:33:54 crc kubenswrapper[5021]: I0121 16:33:54.232630 5021 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 16:33:54 crc kubenswrapper[5021]: I0121 16:33:54.232669 5021 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 16:33:54 crc kubenswrapper[5021]: I0121 16:33:54.232683 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t2b7d\" (UniqueName: \"kubernetes.io/projected/57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0-kube-api-access-t2b7d\") on node \"crc\" DevicePath \"\"" Jan 21 16:33:54 crc kubenswrapper[5021]: I0121 16:33:54.237850 5021 scope.go:117] "RemoveContainer" containerID="5089b1512ac5677d44597e470026940fc312b1f86618b186f8b5a4421434c858" Jan 21 16:33:54 crc kubenswrapper[5021]: E0121 16:33:54.238424 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5089b1512ac5677d44597e470026940fc312b1f86618b186f8b5a4421434c858\": container with ID starting with 5089b1512ac5677d44597e470026940fc312b1f86618b186f8b5a4421434c858 not found: ID does not exist" containerID="5089b1512ac5677d44597e470026940fc312b1f86618b186f8b5a4421434c858" Jan 21 16:33:54 crc kubenswrapper[5021]: I0121 16:33:54.238474 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5089b1512ac5677d44597e470026940fc312b1f86618b186f8b5a4421434c858"} err="failed to get container status \"5089b1512ac5677d44597e470026940fc312b1f86618b186f8b5a4421434c858\": rpc error: code = NotFound desc = could not find container \"5089b1512ac5677d44597e470026940fc312b1f86618b186f8b5a4421434c858\": container with ID starting with 5089b1512ac5677d44597e470026940fc312b1f86618b186f8b5a4421434c858 not found: ID does not exist" Jan 21 16:33:54 crc kubenswrapper[5021]: I0121 16:33:54.238506 5021 scope.go:117] "RemoveContainer" containerID="0664c35973f8397e2a99e5a9cac2a6aea00f4a0fb39163cc1c776131f038b639" Jan 21 16:33:54 crc kubenswrapper[5021]: E0121 16:33:54.238968 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0664c35973f8397e2a99e5a9cac2a6aea00f4a0fb39163cc1c776131f038b639\": container with ID starting with 0664c35973f8397e2a99e5a9cac2a6aea00f4a0fb39163cc1c776131f038b639 not found: ID does not exist" containerID="0664c35973f8397e2a99e5a9cac2a6aea00f4a0fb39163cc1c776131f038b639" Jan 21 16:33:54 crc kubenswrapper[5021]: I0121 16:33:54.239098 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0664c35973f8397e2a99e5a9cac2a6aea00f4a0fb39163cc1c776131f038b639"} err="failed to get container status \"0664c35973f8397e2a99e5a9cac2a6aea00f4a0fb39163cc1c776131f038b639\": rpc error: code = NotFound desc = could not find container \"0664c35973f8397e2a99e5a9cac2a6aea00f4a0fb39163cc1c776131f038b639\": container with ID starting with 0664c35973f8397e2a99e5a9cac2a6aea00f4a0fb39163cc1c776131f038b639 not found: ID does not exist" Jan 21 16:33:54 crc kubenswrapper[5021]: I0121 16:33:54.239195 5021 scope.go:117] "RemoveContainer" containerID="9642b494c067a57bad5bb16c11727ecf6f50e7d4b51e8871ca00479f0409e618" Jan 21 16:33:54 crc kubenswrapper[5021]: E0121 16:33:54.239581 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9642b494c067a57bad5bb16c11727ecf6f50e7d4b51e8871ca00479f0409e618\": container with ID starting with 9642b494c067a57bad5bb16c11727ecf6f50e7d4b51e8871ca00479f0409e618 not found: ID does not exist" containerID="9642b494c067a57bad5bb16c11727ecf6f50e7d4b51e8871ca00479f0409e618" Jan 21 16:33:54 crc kubenswrapper[5021]: I0121 16:33:54.239643 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9642b494c067a57bad5bb16c11727ecf6f50e7d4b51e8871ca00479f0409e618"} err="failed to get container status \"9642b494c067a57bad5bb16c11727ecf6f50e7d4b51e8871ca00479f0409e618\": rpc error: code = NotFound desc = could not find container \"9642b494c067a57bad5bb16c11727ecf6f50e7d4b51e8871ca00479f0409e618\": container with ID starting with 9642b494c067a57bad5bb16c11727ecf6f50e7d4b51e8871ca00479f0409e618 not found: ID does not exist" Jan 21 16:33:54 crc kubenswrapper[5021]: I0121 16:33:54.491973 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-qfsqt"] Jan 21 16:33:54 crc kubenswrapper[5021]: I0121 16:33:54.498894 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-qfsqt"] Jan 21 16:33:54 crc kubenswrapper[5021]: I0121 16:33:54.745839 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0" path="/var/lib/kubelet/pods/57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0/volumes" Jan 21 16:34:12 crc kubenswrapper[5021]: I0121 16:34:12.357025 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 16:34:12 crc kubenswrapper[5021]: I0121 16:34:12.357566 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 16:34:42 crc kubenswrapper[5021]: I0121 16:34:42.356930 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 16:34:42 crc kubenswrapper[5021]: I0121 16:34:42.357541 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 16:34:42 crc kubenswrapper[5021]: I0121 16:34:42.357594 5021 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" Jan 21 16:34:42 crc kubenswrapper[5021]: I0121 16:34:42.358252 5021 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"acd17d6b49cc44519ee1a965e036044fd9efc4ca3c60a27b9cd1b8e7c06061eb"} pod="openshift-machine-config-operator/machine-config-daemon-n22xz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 21 16:34:42 crc kubenswrapper[5021]: I0121 16:34:42.358302 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" containerID="cri-o://acd17d6b49cc44519ee1a965e036044fd9efc4ca3c60a27b9cd1b8e7c06061eb" gracePeriod=600 Jan 21 16:34:43 crc kubenswrapper[5021]: I0121 16:34:43.478194 5021 generic.go:334] "Generic (PLEG): container finished" podID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerID="acd17d6b49cc44519ee1a965e036044fd9efc4ca3c60a27b9cd1b8e7c06061eb" exitCode=0 Jan 21 16:34:43 crc kubenswrapper[5021]: I0121 16:34:43.478276 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" event={"ID":"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1","Type":"ContainerDied","Data":"acd17d6b49cc44519ee1a965e036044fd9efc4ca3c60a27b9cd1b8e7c06061eb"} Jan 21 16:34:43 crc kubenswrapper[5021]: I0121 16:34:43.478503 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" event={"ID":"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1","Type":"ContainerStarted","Data":"f6b1fb9418fc722a7a20dda69b64ff74fe4c6b22f2589b779c023d74041121ec"} Jan 21 16:34:43 crc kubenswrapper[5021]: I0121 16:34:43.478525 5021 scope.go:117] "RemoveContainer" containerID="2b478fc442abbb775fd61836bc7953eb653808ff3f07906e91a7cce619b817f8" Jan 21 16:34:44 crc kubenswrapper[5021]: I0121 16:34:44.559837 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-9rcpg"] Jan 21 16:34:44 crc kubenswrapper[5021]: E0121 16:34:44.560666 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0" containerName="extract-utilities" Jan 21 16:34:44 crc kubenswrapper[5021]: I0121 16:34:44.560685 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0" containerName="extract-utilities" Jan 21 16:34:44 crc kubenswrapper[5021]: E0121 16:34:44.560694 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0" containerName="extract-content" Jan 21 16:34:44 crc kubenswrapper[5021]: I0121 16:34:44.560701 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0" containerName="extract-content" Jan 21 16:34:44 crc kubenswrapper[5021]: E0121 16:34:44.560726 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0" containerName="registry-server" Jan 21 16:34:44 crc kubenswrapper[5021]: I0121 16:34:44.560733 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0" containerName="registry-server" Jan 21 16:34:44 crc kubenswrapper[5021]: I0121 16:34:44.560927 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="57cd0ff9-72b1-445e-8bcd-b77e3ecee8d0" containerName="registry-server" Jan 21 16:34:44 crc kubenswrapper[5021]: I0121 16:34:44.562234 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9rcpg" Jan 21 16:34:44 crc kubenswrapper[5021]: I0121 16:34:44.578227 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9rcpg"] Jan 21 16:34:44 crc kubenswrapper[5021]: I0121 16:34:44.640854 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5wn6t\" (UniqueName: \"kubernetes.io/projected/a7e0540f-034e-495c-8f28-9680865cb6e3-kube-api-access-5wn6t\") pod \"certified-operators-9rcpg\" (UID: \"a7e0540f-034e-495c-8f28-9680865cb6e3\") " pod="openshift-marketplace/certified-operators-9rcpg" Jan 21 16:34:44 crc kubenswrapper[5021]: I0121 16:34:44.641100 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a7e0540f-034e-495c-8f28-9680865cb6e3-catalog-content\") pod \"certified-operators-9rcpg\" (UID: \"a7e0540f-034e-495c-8f28-9680865cb6e3\") " pod="openshift-marketplace/certified-operators-9rcpg" Jan 21 16:34:44 crc kubenswrapper[5021]: I0121 16:34:44.641170 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a7e0540f-034e-495c-8f28-9680865cb6e3-utilities\") pod \"certified-operators-9rcpg\" (UID: \"a7e0540f-034e-495c-8f28-9680865cb6e3\") " pod="openshift-marketplace/certified-operators-9rcpg" Jan 21 16:34:44 crc kubenswrapper[5021]: I0121 16:34:44.742066 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a7e0540f-034e-495c-8f28-9680865cb6e3-catalog-content\") pod \"certified-operators-9rcpg\" (UID: \"a7e0540f-034e-495c-8f28-9680865cb6e3\") " pod="openshift-marketplace/certified-operators-9rcpg" Jan 21 16:34:44 crc kubenswrapper[5021]: I0121 16:34:44.742117 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a7e0540f-034e-495c-8f28-9680865cb6e3-utilities\") pod \"certified-operators-9rcpg\" (UID: \"a7e0540f-034e-495c-8f28-9680865cb6e3\") " pod="openshift-marketplace/certified-operators-9rcpg" Jan 21 16:34:44 crc kubenswrapper[5021]: I0121 16:34:44.742148 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5wn6t\" (UniqueName: \"kubernetes.io/projected/a7e0540f-034e-495c-8f28-9680865cb6e3-kube-api-access-5wn6t\") pod \"certified-operators-9rcpg\" (UID: \"a7e0540f-034e-495c-8f28-9680865cb6e3\") " pod="openshift-marketplace/certified-operators-9rcpg" Jan 21 16:34:44 crc kubenswrapper[5021]: I0121 16:34:44.742635 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a7e0540f-034e-495c-8f28-9680865cb6e3-catalog-content\") pod \"certified-operators-9rcpg\" (UID: \"a7e0540f-034e-495c-8f28-9680865cb6e3\") " pod="openshift-marketplace/certified-operators-9rcpg" Jan 21 16:34:44 crc kubenswrapper[5021]: I0121 16:34:44.742821 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a7e0540f-034e-495c-8f28-9680865cb6e3-utilities\") pod \"certified-operators-9rcpg\" (UID: \"a7e0540f-034e-495c-8f28-9680865cb6e3\") " pod="openshift-marketplace/certified-operators-9rcpg" Jan 21 16:34:44 crc kubenswrapper[5021]: I0121 16:34:44.762894 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5wn6t\" (UniqueName: \"kubernetes.io/projected/a7e0540f-034e-495c-8f28-9680865cb6e3-kube-api-access-5wn6t\") pod \"certified-operators-9rcpg\" (UID: \"a7e0540f-034e-495c-8f28-9680865cb6e3\") " pod="openshift-marketplace/certified-operators-9rcpg" Jan 21 16:34:44 crc kubenswrapper[5021]: I0121 16:34:44.883901 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9rcpg" Jan 21 16:34:45 crc kubenswrapper[5021]: I0121 16:34:45.360505 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9rcpg"] Jan 21 16:34:45 crc kubenswrapper[5021]: I0121 16:34:45.495221 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9rcpg" event={"ID":"a7e0540f-034e-495c-8f28-9680865cb6e3","Type":"ContainerStarted","Data":"07714a68f1a1ab44f97570f69bf0ca8303e62935ee186821323ee6e5c804455e"} Jan 21 16:34:45 crc kubenswrapper[5021]: I0121 16:34:45.495523 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9rcpg" event={"ID":"a7e0540f-034e-495c-8f28-9680865cb6e3","Type":"ContainerStarted","Data":"23413137b6f5301524727ce03a0817f9554dbf16755d999b09d04ff14e191339"} Jan 21 16:34:46 crc kubenswrapper[5021]: I0121 16:34:46.502843 5021 generic.go:334] "Generic (PLEG): container finished" podID="a7e0540f-034e-495c-8f28-9680865cb6e3" containerID="07714a68f1a1ab44f97570f69bf0ca8303e62935ee186821323ee6e5c804455e" exitCode=0 Jan 21 16:34:46 crc kubenswrapper[5021]: I0121 16:34:46.502895 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9rcpg" event={"ID":"a7e0540f-034e-495c-8f28-9680865cb6e3","Type":"ContainerDied","Data":"07714a68f1a1ab44f97570f69bf0ca8303e62935ee186821323ee6e5c804455e"} Jan 21 16:34:47 crc kubenswrapper[5021]: I0121 16:34:47.511671 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9rcpg" event={"ID":"a7e0540f-034e-495c-8f28-9680865cb6e3","Type":"ContainerStarted","Data":"5880809d0dad11883bfabe015aea354e4209625c364ead985412145cab25738d"} Jan 21 16:34:48 crc kubenswrapper[5021]: I0121 16:34:48.518776 5021 generic.go:334] "Generic (PLEG): container finished" podID="a7e0540f-034e-495c-8f28-9680865cb6e3" containerID="5880809d0dad11883bfabe015aea354e4209625c364ead985412145cab25738d" exitCode=0 Jan 21 16:34:48 crc kubenswrapper[5021]: I0121 16:34:48.518845 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9rcpg" event={"ID":"a7e0540f-034e-495c-8f28-9680865cb6e3","Type":"ContainerDied","Data":"5880809d0dad11883bfabe015aea354e4209625c364ead985412145cab25738d"} Jan 21 16:34:49 crc kubenswrapper[5021]: I0121 16:34:49.527964 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9rcpg" event={"ID":"a7e0540f-034e-495c-8f28-9680865cb6e3","Type":"ContainerStarted","Data":"7aedc2d3210c25503977e913626d2b0eb09dfdd17d9d8c6d1bfb415205b085a5"} Jan 21 16:34:49 crc kubenswrapper[5021]: I0121 16:34:49.550409 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-9rcpg" podStartSLOduration=2.825970801 podStartE2EDuration="5.550391171s" podCreationTimestamp="2026-01-21 16:34:44 +0000 UTC" firstStartedPulling="2026-01-21 16:34:46.506243554 +0000 UTC m=+4228.041357443" lastFinishedPulling="2026-01-21 16:34:49.230663924 +0000 UTC m=+4230.765777813" observedRunningTime="2026-01-21 16:34:49.544033068 +0000 UTC m=+4231.079146957" watchObservedRunningTime="2026-01-21 16:34:49.550391171 +0000 UTC m=+4231.085505060" Jan 21 16:34:54 crc kubenswrapper[5021]: I0121 16:34:54.885031 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-9rcpg" Jan 21 16:34:54 crc kubenswrapper[5021]: I0121 16:34:54.886054 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-9rcpg" Jan 21 16:34:54 crc kubenswrapper[5021]: I0121 16:34:54.937107 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-9rcpg" Jan 21 16:34:55 crc kubenswrapper[5021]: I0121 16:34:55.600653 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-9rcpg" Jan 21 16:34:55 crc kubenswrapper[5021]: I0121 16:34:55.660236 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-9rcpg"] Jan 21 16:34:57 crc kubenswrapper[5021]: I0121 16:34:57.574092 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-9rcpg" podUID="a7e0540f-034e-495c-8f28-9680865cb6e3" containerName="registry-server" containerID="cri-o://7aedc2d3210c25503977e913626d2b0eb09dfdd17d9d8c6d1bfb415205b085a5" gracePeriod=2 Jan 21 16:34:58 crc kubenswrapper[5021]: I0121 16:34:58.582520 5021 generic.go:334] "Generic (PLEG): container finished" podID="a7e0540f-034e-495c-8f28-9680865cb6e3" containerID="7aedc2d3210c25503977e913626d2b0eb09dfdd17d9d8c6d1bfb415205b085a5" exitCode=0 Jan 21 16:34:58 crc kubenswrapper[5021]: I0121 16:34:58.582570 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9rcpg" event={"ID":"a7e0540f-034e-495c-8f28-9680865cb6e3","Type":"ContainerDied","Data":"7aedc2d3210c25503977e913626d2b0eb09dfdd17d9d8c6d1bfb415205b085a5"} Jan 21 16:34:59 crc kubenswrapper[5021]: I0121 16:34:59.104520 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9rcpg" Jan 21 16:34:59 crc kubenswrapper[5021]: I0121 16:34:59.249782 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a7e0540f-034e-495c-8f28-9680865cb6e3-catalog-content\") pod \"a7e0540f-034e-495c-8f28-9680865cb6e3\" (UID: \"a7e0540f-034e-495c-8f28-9680865cb6e3\") " Jan 21 16:34:59 crc kubenswrapper[5021]: I0121 16:34:59.249859 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a7e0540f-034e-495c-8f28-9680865cb6e3-utilities\") pod \"a7e0540f-034e-495c-8f28-9680865cb6e3\" (UID: \"a7e0540f-034e-495c-8f28-9680865cb6e3\") " Jan 21 16:34:59 crc kubenswrapper[5021]: I0121 16:34:59.250010 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5wn6t\" (UniqueName: \"kubernetes.io/projected/a7e0540f-034e-495c-8f28-9680865cb6e3-kube-api-access-5wn6t\") pod \"a7e0540f-034e-495c-8f28-9680865cb6e3\" (UID: \"a7e0540f-034e-495c-8f28-9680865cb6e3\") " Jan 21 16:34:59 crc kubenswrapper[5021]: I0121 16:34:59.250989 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a7e0540f-034e-495c-8f28-9680865cb6e3-utilities" (OuterVolumeSpecName: "utilities") pod "a7e0540f-034e-495c-8f28-9680865cb6e3" (UID: "a7e0540f-034e-495c-8f28-9680865cb6e3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 16:34:59 crc kubenswrapper[5021]: I0121 16:34:59.257142 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a7e0540f-034e-495c-8f28-9680865cb6e3-kube-api-access-5wn6t" (OuterVolumeSpecName: "kube-api-access-5wn6t") pod "a7e0540f-034e-495c-8f28-9680865cb6e3" (UID: "a7e0540f-034e-495c-8f28-9680865cb6e3"). InnerVolumeSpecName "kube-api-access-5wn6t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 16:34:59 crc kubenswrapper[5021]: I0121 16:34:59.294129 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a7e0540f-034e-495c-8f28-9680865cb6e3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a7e0540f-034e-495c-8f28-9680865cb6e3" (UID: "a7e0540f-034e-495c-8f28-9680865cb6e3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 16:34:59 crc kubenswrapper[5021]: I0121 16:34:59.351443 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5wn6t\" (UniqueName: \"kubernetes.io/projected/a7e0540f-034e-495c-8f28-9680865cb6e3-kube-api-access-5wn6t\") on node \"crc\" DevicePath \"\"" Jan 21 16:34:59 crc kubenswrapper[5021]: I0121 16:34:59.351484 5021 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a7e0540f-034e-495c-8f28-9680865cb6e3-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 16:34:59 crc kubenswrapper[5021]: I0121 16:34:59.351498 5021 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a7e0540f-034e-495c-8f28-9680865cb6e3-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 16:34:59 crc kubenswrapper[5021]: I0121 16:34:59.592006 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9rcpg" event={"ID":"a7e0540f-034e-495c-8f28-9680865cb6e3","Type":"ContainerDied","Data":"23413137b6f5301524727ce03a0817f9554dbf16755d999b09d04ff14e191339"} Jan 21 16:34:59 crc kubenswrapper[5021]: I0121 16:34:59.592063 5021 scope.go:117] "RemoveContainer" containerID="7aedc2d3210c25503977e913626d2b0eb09dfdd17d9d8c6d1bfb415205b085a5" Jan 21 16:34:59 crc kubenswrapper[5021]: I0121 16:34:59.592070 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9rcpg" Jan 21 16:34:59 crc kubenswrapper[5021]: I0121 16:34:59.614934 5021 scope.go:117] "RemoveContainer" containerID="5880809d0dad11883bfabe015aea354e4209625c364ead985412145cab25738d" Jan 21 16:34:59 crc kubenswrapper[5021]: I0121 16:34:59.633609 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-9rcpg"] Jan 21 16:34:59 crc kubenswrapper[5021]: I0121 16:34:59.642320 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-9rcpg"] Jan 21 16:34:59 crc kubenswrapper[5021]: I0121 16:34:59.650940 5021 scope.go:117] "RemoveContainer" containerID="07714a68f1a1ab44f97570f69bf0ca8303e62935ee186821323ee6e5c804455e" Jan 21 16:35:00 crc kubenswrapper[5021]: I0121 16:35:00.745944 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a7e0540f-034e-495c-8f28-9680865cb6e3" path="/var/lib/kubelet/pods/a7e0540f-034e-495c-8f28-9680865cb6e3/volumes" Jan 21 16:36:42 crc kubenswrapper[5021]: I0121 16:36:42.357731 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 16:36:42 crc kubenswrapper[5021]: I0121 16:36:42.358328 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 16:37:12 crc kubenswrapper[5021]: I0121 16:37:12.356717 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 16:37:12 crc kubenswrapper[5021]: I0121 16:37:12.357405 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 16:37:42 crc kubenswrapper[5021]: I0121 16:37:42.356622 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 16:37:42 crc kubenswrapper[5021]: I0121 16:37:42.357212 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 16:37:42 crc kubenswrapper[5021]: I0121 16:37:42.357275 5021 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" Jan 21 16:37:42 crc kubenswrapper[5021]: I0121 16:37:42.357874 5021 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f6b1fb9418fc722a7a20dda69b64ff74fe4c6b22f2589b779c023d74041121ec"} pod="openshift-machine-config-operator/machine-config-daemon-n22xz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 21 16:37:42 crc kubenswrapper[5021]: I0121 16:37:42.357956 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" containerID="cri-o://f6b1fb9418fc722a7a20dda69b64ff74fe4c6b22f2589b779c023d74041121ec" gracePeriod=600 Jan 21 16:37:42 crc kubenswrapper[5021]: E0121 16:37:42.478139 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:37:42 crc kubenswrapper[5021]: I0121 16:37:42.693972 5021 generic.go:334] "Generic (PLEG): container finished" podID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerID="f6b1fb9418fc722a7a20dda69b64ff74fe4c6b22f2589b779c023d74041121ec" exitCode=0 Jan 21 16:37:42 crc kubenswrapper[5021]: I0121 16:37:42.694040 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" event={"ID":"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1","Type":"ContainerDied","Data":"f6b1fb9418fc722a7a20dda69b64ff74fe4c6b22f2589b779c023d74041121ec"} Jan 21 16:37:42 crc kubenswrapper[5021]: I0121 16:37:42.694094 5021 scope.go:117] "RemoveContainer" containerID="acd17d6b49cc44519ee1a965e036044fd9efc4ca3c60a27b9cd1b8e7c06061eb" Jan 21 16:37:42 crc kubenswrapper[5021]: I0121 16:37:42.694844 5021 scope.go:117] "RemoveContainer" containerID="f6b1fb9418fc722a7a20dda69b64ff74fe4c6b22f2589b779c023d74041121ec" Jan 21 16:37:42 crc kubenswrapper[5021]: E0121 16:37:42.695319 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:37:54 crc kubenswrapper[5021]: I0121 16:37:54.738560 5021 scope.go:117] "RemoveContainer" containerID="f6b1fb9418fc722a7a20dda69b64ff74fe4c6b22f2589b779c023d74041121ec" Jan 21 16:37:54 crc kubenswrapper[5021]: E0121 16:37:54.739311 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:38:08 crc kubenswrapper[5021]: I0121 16:38:08.741822 5021 scope.go:117] "RemoveContainer" containerID="f6b1fb9418fc722a7a20dda69b64ff74fe4c6b22f2589b779c023d74041121ec" Jan 21 16:38:08 crc kubenswrapper[5021]: E0121 16:38:08.742670 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:38:23 crc kubenswrapper[5021]: I0121 16:38:23.738520 5021 scope.go:117] "RemoveContainer" containerID="f6b1fb9418fc722a7a20dda69b64ff74fe4c6b22f2589b779c023d74041121ec" Jan 21 16:38:23 crc kubenswrapper[5021]: E0121 16:38:23.739555 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:38:34 crc kubenswrapper[5021]: I0121 16:38:34.738074 5021 scope.go:117] "RemoveContainer" containerID="f6b1fb9418fc722a7a20dda69b64ff74fe4c6b22f2589b779c023d74041121ec" Jan 21 16:38:34 crc kubenswrapper[5021]: E0121 16:38:34.738768 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:38:45 crc kubenswrapper[5021]: I0121 16:38:45.737931 5021 scope.go:117] "RemoveContainer" containerID="f6b1fb9418fc722a7a20dda69b64ff74fe4c6b22f2589b779c023d74041121ec" Jan 21 16:38:45 crc kubenswrapper[5021]: E0121 16:38:45.738694 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:38:58 crc kubenswrapper[5021]: I0121 16:38:58.745315 5021 scope.go:117] "RemoveContainer" containerID="f6b1fb9418fc722a7a20dda69b64ff74fe4c6b22f2589b779c023d74041121ec" Jan 21 16:38:58 crc kubenswrapper[5021]: E0121 16:38:58.745979 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:39:10 crc kubenswrapper[5021]: I0121 16:39:10.737263 5021 scope.go:117] "RemoveContainer" containerID="f6b1fb9418fc722a7a20dda69b64ff74fe4c6b22f2589b779c023d74041121ec" Jan 21 16:39:10 crc kubenswrapper[5021]: E0121 16:39:10.737825 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:39:25 crc kubenswrapper[5021]: I0121 16:39:25.737641 5021 scope.go:117] "RemoveContainer" containerID="f6b1fb9418fc722a7a20dda69b64ff74fe4c6b22f2589b779c023d74041121ec" Jan 21 16:39:25 crc kubenswrapper[5021]: E0121 16:39:25.738405 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:39:38 crc kubenswrapper[5021]: I0121 16:39:38.741854 5021 scope.go:117] "RemoveContainer" containerID="f6b1fb9418fc722a7a20dda69b64ff74fe4c6b22f2589b779c023d74041121ec" Jan 21 16:40:06 crc kubenswrapper[5021]: E0121 16:39:38.742799 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:40:06 crc kubenswrapper[5021]: I0121 16:39:50.738064 5021 scope.go:117] "RemoveContainer" containerID="f6b1fb9418fc722a7a20dda69b64ff74fe4c6b22f2589b779c023d74041121ec" Jan 21 16:40:06 crc kubenswrapper[5021]: E0121 16:39:50.739042 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:40:06 crc kubenswrapper[5021]: I0121 16:40:06.611475 5021 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 15.173500486s: [/var/lib/containers/storage/overlay/72a701d5977fcd187df116e8123a7bc6cc59d89b273fae510cfa4c23061ebdbf/diff /var/log/pods/openshift-kube-controller-manager-operator_kube-controller-manager-operator-78b949d7b-p5qnh_3b265d32-a4f3-4a09-931e-6f6ac0b82c1c/kube-controller-manager-operator/0.log]; will not log again for this container unless duration exceeds 2s Jan 21 16:40:06 crc kubenswrapper[5021]: I0121 16:40:06.612862 5021 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 15.017980543s: [/var/lib/containers/storage/overlay/fd55b7f030b7a7d396cf4cf7a4f1cbe74494d0cf4486cf500fd60f15ba709d01/diff /var/log/pods/hostpath-provisioner_csi-hostpathplugin-pkmbf_f0a0868f-a7c7-4bce-a9b5-855a11e2631e/hostpath-provisioner/0.log]; will not log again for this container unless duration exceeds 2s Jan 21 16:40:06 crc kubenswrapper[5021]: I0121 16:40:06.614396 5021 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 14.805703381s: [/var/lib/containers/storage/overlay/dbffcdbac9ae02af92c0b1f10d5f7213d70e9cce06acf93a243fa53057f3927a/diff ]; will not log again for this container unless duration exceeds 2s Jan 21 16:40:06 crc kubenswrapper[5021]: I0121 16:40:06.617737 5021 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 14.655130223s: [/var/lib/containers/storage/overlay/521956900b398722fbb81a402e0f28fb8d86dadfc4112fc0ff9dc4f5aa552b76/diff /var/log/pods/openshift-apiserver_apiserver-76f77b778f-g8wp8_e72ac95a-ad13-408c-b595-9e983c185119/openshift-apiserver-check-endpoints/0.log]; will not log again for this container unless duration exceeds 2s Jan 21 16:40:06 crc kubenswrapper[5021]: E0121 16:40:06.619545 5021 kubelet.go:2526] "Housekeeping took longer than expected" err="housekeeping took too long" expected="1s" actual="15.883s" Jan 21 16:40:06 crc kubenswrapper[5021]: I0121 16:40:06.619887 5021 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 14.468189312s: [/var/lib/containers/storage/overlay/5f50e4143ef8a913073e9209696f0d6dedd108aa51941ebf3bb8acaec35cf212/diff /var/log/pods/cert-manager_cert-manager-86cb77c54b-sctx7_4e8ca7a2-523e-4c60-8032-020eed1d3acc/cert-manager-controller/0.log]; will not log again for this container unless duration exceeds 2s Jan 21 16:40:06 crc kubenswrapper[5021]: I0121 16:40:06.620129 5021 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 14.468409478s: [/var/lib/containers/storage/overlay/9b708d66948946b0d9115463724d89754d35bddf8a62a11f94593c2ddd112eac/diff /var/log/pods/cert-manager_cert-manager-webhook-f4fb5df64-chdrq_f1b5f0fa-4a66-443b-9178-8f51aee84d1f/cert-manager-webhook/0.log]; will not log again for this container unless duration exceeds 2s Jan 21 16:40:06 crc kubenswrapper[5021]: I0121 16:40:06.620766 5021 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 14.469046436s: [/var/lib/containers/storage/overlay/0e94ebee05c2ddc3c565c518ab435e158ea8c38bab39c990f9f0bdae87197609/diff /var/log/pods/cert-manager_cert-manager-cainjector-855d9ccff4-ds76b_6fc31342-f27a-4bc1-8121-a283abb689fa/cert-manager-cainjector/0.log]; will not log again for this container unless duration exceeds 2s Jan 21 16:40:06 crc kubenswrapper[5021]: I0121 16:40:06.621164 5021 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 14.34862672s: [/var/lib/containers/storage/overlay/2fb68403a16be225a45da2facf5648f8ffb4e9b5fd9b83083ce8a53ba4bcc4d4/diff /var/log/pods/openshift-machine-config-operator_machine-config-controller-84d6567774-ntg54_d5642370-ee34-4ee6-8ae8-0951768da987/kube-rbac-proxy/0.log]; will not log again for this container unless duration exceeds 2s Jan 21 16:40:06 crc kubenswrapper[5021]: I0121 16:40:06.622016 5021 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 14.302673387s: [/var/lib/containers/storage/overlay/8231bee7d6cb7aec7f17ce2307ace447cc686ac0637402fdbd492d88d93f8481/diff /var/log/pods/openshift-ingress-operator_ingress-operator-5b745b69d9-64lkf_4d5503fc-0527-456d-b97d-7a455bdf3e7f/kube-rbac-proxy/0.log]; will not log again for this container unless duration exceeds 2s Jan 21 16:40:06 crc kubenswrapper[5021]: I0121 16:40:06.625194 5021 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 14.122567173s: [/var/lib/containers/storage/overlay/ed6985e15a98e7f4034387f1d787c8b725b869577fb5d5a565f5b94ffd5b926b/diff /var/log/pods/openshift-dns_dns-default-vpddf_f384f01c-b331-4051-aef7-8da3fbbad2ab/kube-rbac-proxy/0.log]; will not log again for this container unless duration exceeds 2s Jan 21 16:40:06 crc kubenswrapper[5021]: I0121 16:40:06.632401 5021 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 13.546790464s: [/var/lib/containers/storage/overlay/fe6010ec73c569fc64853747c400687cf42fafa8341251487d150b632028e0a9/diff /var/log/pods/metallb-system_frr-k8s-ld56k_97b76490-a49c-4ddc-b6ba-7fbda7094851/controller/0.log]; will not log again for this container unless duration exceeds 2s Jan 21 16:40:06 crc kubenswrapper[5021]: I0121 16:40:06.634849 5021 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 13.402400894s: [/var/lib/containers/storage/overlay/a088a1a0456b815379469965834bca02f3c7bb05f289bcb55f9f175ae379aa1b/diff ]; will not log again for this container unless duration exceeds 2s Jan 21 16:40:06 crc kubenswrapper[5021]: I0121 16:40:06.636123 5021 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 13.352967946s: [/var/lib/containers/storage/overlay/3c396c72c6c64e3eab94db03711d9515f023553464a128e6ca47ca8397efb852/diff /var/log/pods/metallb-system_frr-k8s-ld56k_97b76490-a49c-4ddc-b6ba-7fbda7094851/frr/0.log]; will not log again for this container unless duration exceeds 2s Jan 21 16:40:06 crc kubenswrapper[5021]: I0121 16:40:06.638487 5021 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 13.240810155s: [/var/lib/containers/storage/overlay/a101f0c3c9316c21bb3ca56a9bbcc6ed20a0328847b518b75c9a873029f37960/diff /var/log/pods/metallb-system_frr-k8s-ld56k_97b76490-a49c-4ddc-b6ba-7fbda7094851/reloader/0.log]; will not log again for this container unless duration exceeds 2s Jan 21 16:40:06 crc kubenswrapper[5021]: I0121 16:40:06.642104 5021 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 13.106869241s: [/var/lib/containers/storage/overlay/3ded0ce9892181a7e7fb226dcd183f74af77337a1f14dfd20887bb083dc8b17d/diff /var/log/pods/metallb-system_frr-k8s-ld56k_97b76490-a49c-4ddc-b6ba-7fbda7094851/frr-metrics/0.log]; will not log again for this container unless duration exceeds 2s Jan 21 16:40:06 crc kubenswrapper[5021]: I0121 16:40:06.646163 5021 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 12.870530123s: [/var/lib/containers/storage/overlay/96528cb1b3254eff92da2dd1121db4c01db2f1d1adb64d3a242756f647f7bf34/diff /var/log/pods/metallb-system_frr-k8s-ld56k_97b76490-a49c-4ddc-b6ba-7fbda7094851/kube-rbac-proxy/0.log]; will not log again for this container unless duration exceeds 2s Jan 21 16:40:06 crc kubenswrapper[5021]: I0121 16:40:06.651600 5021 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 13.314660831s: [/var/lib/containers/storage/overlay/bfb752ef7f5185575f9cb41dc9d4bbfd43628093437c6440309690d9b20fcb57/diff /var/log/pods/openshift-image-registry_image-registry-66df7c8f76-7mnxh_63f57f38-5cf6-4f89-b522-f3ae89bc7faa/registry/0.log]; will not log again for this container unless duration exceeds 2s Jan 21 16:40:06 crc kubenswrapper[5021]: I0121 16:40:06.658233 5021 scope.go:117] "RemoveContainer" containerID="f6b1fb9418fc722a7a20dda69b64ff74fe4c6b22f2589b779c023d74041121ec" Jan 21 16:40:06 crc kubenswrapper[5021]: E0121 16:40:06.658451 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:40:17 crc kubenswrapper[5021]: I0121 16:40:17.738481 5021 scope.go:117] "RemoveContainer" containerID="f6b1fb9418fc722a7a20dda69b64ff74fe4c6b22f2589b779c023d74041121ec" Jan 21 16:40:17 crc kubenswrapper[5021]: E0121 16:40:17.739350 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:40:28 crc kubenswrapper[5021]: I0121 16:40:28.745539 5021 scope.go:117] "RemoveContainer" containerID="f6b1fb9418fc722a7a20dda69b64ff74fe4c6b22f2589b779c023d74041121ec" Jan 21 16:40:28 crc kubenswrapper[5021]: E0121 16:40:28.747276 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:40:39 crc kubenswrapper[5021]: I0121 16:40:39.739614 5021 scope.go:117] "RemoveContainer" containerID="f6b1fb9418fc722a7a20dda69b64ff74fe4c6b22f2589b779c023d74041121ec" Jan 21 16:40:39 crc kubenswrapper[5021]: E0121 16:40:39.740730 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:40:54 crc kubenswrapper[5021]: I0121 16:40:54.738687 5021 scope.go:117] "RemoveContainer" containerID="f6b1fb9418fc722a7a20dda69b64ff74fe4c6b22f2589b779c023d74041121ec" Jan 21 16:40:54 crc kubenswrapper[5021]: E0121 16:40:54.739571 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:41:03 crc kubenswrapper[5021]: I0121 16:41:03.034461 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-kxfkv/must-gather-spnwq"] Jan 21 16:41:03 crc kubenswrapper[5021]: E0121 16:41:03.036152 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7e0540f-034e-495c-8f28-9680865cb6e3" containerName="extract-utilities" Jan 21 16:41:03 crc kubenswrapper[5021]: I0121 16:41:03.036179 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7e0540f-034e-495c-8f28-9680865cb6e3" containerName="extract-utilities" Jan 21 16:41:03 crc kubenswrapper[5021]: E0121 16:41:03.036200 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7e0540f-034e-495c-8f28-9680865cb6e3" containerName="registry-server" Jan 21 16:41:03 crc kubenswrapper[5021]: I0121 16:41:03.036209 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7e0540f-034e-495c-8f28-9680865cb6e3" containerName="registry-server" Jan 21 16:41:03 crc kubenswrapper[5021]: E0121 16:41:03.036239 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7e0540f-034e-495c-8f28-9680865cb6e3" containerName="extract-content" Jan 21 16:41:03 crc kubenswrapper[5021]: I0121 16:41:03.036250 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7e0540f-034e-495c-8f28-9680865cb6e3" containerName="extract-content" Jan 21 16:41:03 crc kubenswrapper[5021]: I0121 16:41:03.036497 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7e0540f-034e-495c-8f28-9680865cb6e3" containerName="registry-server" Jan 21 16:41:03 crc kubenswrapper[5021]: I0121 16:41:03.037767 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-kxfkv/must-gather-spnwq" Jan 21 16:41:03 crc kubenswrapper[5021]: I0121 16:41:03.041511 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-kxfkv"/"default-dockercfg-bgssf" Jan 21 16:41:03 crc kubenswrapper[5021]: I0121 16:41:03.041949 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-kxfkv"/"openshift-service-ca.crt" Jan 21 16:41:03 crc kubenswrapper[5021]: I0121 16:41:03.041976 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-kxfkv"/"kube-root-ca.crt" Jan 21 16:41:03 crc kubenswrapper[5021]: I0121 16:41:03.051741 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-kxfkv/must-gather-spnwq"] Jan 21 16:41:03 crc kubenswrapper[5021]: I0121 16:41:03.141522 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k8bn8\" (UniqueName: \"kubernetes.io/projected/95f8dbfd-2482-4331-ad0f-292ffec962e8-kube-api-access-k8bn8\") pod \"must-gather-spnwq\" (UID: \"95f8dbfd-2482-4331-ad0f-292ffec962e8\") " pod="openshift-must-gather-kxfkv/must-gather-spnwq" Jan 21 16:41:03 crc kubenswrapper[5021]: I0121 16:41:03.141584 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/95f8dbfd-2482-4331-ad0f-292ffec962e8-must-gather-output\") pod \"must-gather-spnwq\" (UID: \"95f8dbfd-2482-4331-ad0f-292ffec962e8\") " pod="openshift-must-gather-kxfkv/must-gather-spnwq" Jan 21 16:41:03 crc kubenswrapper[5021]: I0121 16:41:03.243283 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k8bn8\" (UniqueName: \"kubernetes.io/projected/95f8dbfd-2482-4331-ad0f-292ffec962e8-kube-api-access-k8bn8\") pod \"must-gather-spnwq\" (UID: \"95f8dbfd-2482-4331-ad0f-292ffec962e8\") " pod="openshift-must-gather-kxfkv/must-gather-spnwq" Jan 21 16:41:03 crc kubenswrapper[5021]: I0121 16:41:03.243582 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/95f8dbfd-2482-4331-ad0f-292ffec962e8-must-gather-output\") pod \"must-gather-spnwq\" (UID: \"95f8dbfd-2482-4331-ad0f-292ffec962e8\") " pod="openshift-must-gather-kxfkv/must-gather-spnwq" Jan 21 16:41:03 crc kubenswrapper[5021]: I0121 16:41:03.244214 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/95f8dbfd-2482-4331-ad0f-292ffec962e8-must-gather-output\") pod \"must-gather-spnwq\" (UID: \"95f8dbfd-2482-4331-ad0f-292ffec962e8\") " pod="openshift-must-gather-kxfkv/must-gather-spnwq" Jan 21 16:41:03 crc kubenswrapper[5021]: I0121 16:41:03.261052 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k8bn8\" (UniqueName: \"kubernetes.io/projected/95f8dbfd-2482-4331-ad0f-292ffec962e8-kube-api-access-k8bn8\") pod \"must-gather-spnwq\" (UID: \"95f8dbfd-2482-4331-ad0f-292ffec962e8\") " pod="openshift-must-gather-kxfkv/must-gather-spnwq" Jan 21 16:41:03 crc kubenswrapper[5021]: I0121 16:41:03.363165 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-kxfkv/must-gather-spnwq" Jan 21 16:41:03 crc kubenswrapper[5021]: I0121 16:41:03.855058 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-kxfkv/must-gather-spnwq"] Jan 21 16:41:03 crc kubenswrapper[5021]: I0121 16:41:03.872252 5021 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 21 16:41:04 crc kubenswrapper[5021]: I0121 16:41:04.018868 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-kxfkv/must-gather-spnwq" event={"ID":"95f8dbfd-2482-4331-ad0f-292ffec962e8","Type":"ContainerStarted","Data":"f5a54c008858bb4ccba9d021181dec1c0ae2e4644019e0fce7d0caf600465709"} Jan 21 16:41:05 crc kubenswrapper[5021]: I0121 16:41:05.737394 5021 scope.go:117] "RemoveContainer" containerID="f6b1fb9418fc722a7a20dda69b64ff74fe4c6b22f2589b779c023d74041121ec" Jan 21 16:41:05 crc kubenswrapper[5021]: E0121 16:41:05.738125 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:41:11 crc kubenswrapper[5021]: I0121 16:41:11.086114 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-kxfkv/must-gather-spnwq" event={"ID":"95f8dbfd-2482-4331-ad0f-292ffec962e8","Type":"ContainerStarted","Data":"435d87bf05a4a87e645c9c0c9adcb2a4c2ecb020bf32e96e0edf39c1edff5826"} Jan 21 16:41:12 crc kubenswrapper[5021]: I0121 16:41:12.094079 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-kxfkv/must-gather-spnwq" event={"ID":"95f8dbfd-2482-4331-ad0f-292ffec962e8","Type":"ContainerStarted","Data":"bc44bdeaf4006764235cdf749abd11cad00b2606d84854fcf6c01fd7af7e9683"} Jan 21 16:41:12 crc kubenswrapper[5021]: I0121 16:41:12.108765 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-kxfkv/must-gather-spnwq" podStartSLOduration=2.268897892 podStartE2EDuration="9.108744536s" podCreationTimestamp="2026-01-21 16:41:03 +0000 UTC" firstStartedPulling="2026-01-21 16:41:03.871731442 +0000 UTC m=+4605.406845331" lastFinishedPulling="2026-01-21 16:41:10.711578086 +0000 UTC m=+4612.246691975" observedRunningTime="2026-01-21 16:41:12.108157911 +0000 UTC m=+4613.643271800" watchObservedRunningTime="2026-01-21 16:41:12.108744536 +0000 UTC m=+4613.643858415" Jan 21 16:41:13 crc kubenswrapper[5021]: I0121 16:41:13.534310 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-pfxxf"] Jan 21 16:41:13 crc kubenswrapper[5021]: I0121 16:41:13.545694 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pfxxf"] Jan 21 16:41:13 crc kubenswrapper[5021]: I0121 16:41:13.545745 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pfxxf" Jan 21 16:41:13 crc kubenswrapper[5021]: I0121 16:41:13.618119 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c1b866f0-335b-47c9-a051-39f9eddfc745-utilities\") pod \"redhat-operators-pfxxf\" (UID: \"c1b866f0-335b-47c9-a051-39f9eddfc745\") " pod="openshift-marketplace/redhat-operators-pfxxf" Jan 21 16:41:13 crc kubenswrapper[5021]: I0121 16:41:13.618174 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c1b866f0-335b-47c9-a051-39f9eddfc745-catalog-content\") pod \"redhat-operators-pfxxf\" (UID: \"c1b866f0-335b-47c9-a051-39f9eddfc745\") " pod="openshift-marketplace/redhat-operators-pfxxf" Jan 21 16:41:13 crc kubenswrapper[5021]: I0121 16:41:13.618218 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5zbnj\" (UniqueName: \"kubernetes.io/projected/c1b866f0-335b-47c9-a051-39f9eddfc745-kube-api-access-5zbnj\") pod \"redhat-operators-pfxxf\" (UID: \"c1b866f0-335b-47c9-a051-39f9eddfc745\") " pod="openshift-marketplace/redhat-operators-pfxxf" Jan 21 16:41:13 crc kubenswrapper[5021]: I0121 16:41:13.719437 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c1b866f0-335b-47c9-a051-39f9eddfc745-utilities\") pod \"redhat-operators-pfxxf\" (UID: \"c1b866f0-335b-47c9-a051-39f9eddfc745\") " pod="openshift-marketplace/redhat-operators-pfxxf" Jan 21 16:41:13 crc kubenswrapper[5021]: I0121 16:41:13.719481 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c1b866f0-335b-47c9-a051-39f9eddfc745-catalog-content\") pod \"redhat-operators-pfxxf\" (UID: \"c1b866f0-335b-47c9-a051-39f9eddfc745\") " pod="openshift-marketplace/redhat-operators-pfxxf" Jan 21 16:41:13 crc kubenswrapper[5021]: I0121 16:41:13.719508 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5zbnj\" (UniqueName: \"kubernetes.io/projected/c1b866f0-335b-47c9-a051-39f9eddfc745-kube-api-access-5zbnj\") pod \"redhat-operators-pfxxf\" (UID: \"c1b866f0-335b-47c9-a051-39f9eddfc745\") " pod="openshift-marketplace/redhat-operators-pfxxf" Jan 21 16:41:13 crc kubenswrapper[5021]: I0121 16:41:13.719936 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c1b866f0-335b-47c9-a051-39f9eddfc745-utilities\") pod \"redhat-operators-pfxxf\" (UID: \"c1b866f0-335b-47c9-a051-39f9eddfc745\") " pod="openshift-marketplace/redhat-operators-pfxxf" Jan 21 16:41:13 crc kubenswrapper[5021]: I0121 16:41:13.720068 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c1b866f0-335b-47c9-a051-39f9eddfc745-catalog-content\") pod \"redhat-operators-pfxxf\" (UID: \"c1b866f0-335b-47c9-a051-39f9eddfc745\") " pod="openshift-marketplace/redhat-operators-pfxxf" Jan 21 16:41:13 crc kubenswrapper[5021]: I0121 16:41:13.751069 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5zbnj\" (UniqueName: \"kubernetes.io/projected/c1b866f0-335b-47c9-a051-39f9eddfc745-kube-api-access-5zbnj\") pod \"redhat-operators-pfxxf\" (UID: \"c1b866f0-335b-47c9-a051-39f9eddfc745\") " pod="openshift-marketplace/redhat-operators-pfxxf" Jan 21 16:41:13 crc kubenswrapper[5021]: I0121 16:41:13.873983 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pfxxf" Jan 21 16:41:14 crc kubenswrapper[5021]: I0121 16:41:14.568005 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pfxxf"] Jan 21 16:41:14 crc kubenswrapper[5021]: W0121 16:41:14.581977 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc1b866f0_335b_47c9_a051_39f9eddfc745.slice/crio-bfa0971909c629ad7a67e38168b01fe1279f5fa2d09edbd33ddced96de2ab4a6 WatchSource:0}: Error finding container bfa0971909c629ad7a67e38168b01fe1279f5fa2d09edbd33ddced96de2ab4a6: Status 404 returned error can't find the container with id bfa0971909c629ad7a67e38168b01fe1279f5fa2d09edbd33ddced96de2ab4a6 Jan 21 16:41:15 crc kubenswrapper[5021]: I0121 16:41:15.123099 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pfxxf" event={"ID":"c1b866f0-335b-47c9-a051-39f9eddfc745","Type":"ContainerStarted","Data":"bfa0971909c629ad7a67e38168b01fe1279f5fa2d09edbd33ddced96de2ab4a6"} Jan 21 16:41:16 crc kubenswrapper[5021]: I0121 16:41:16.130399 5021 generic.go:334] "Generic (PLEG): container finished" podID="c1b866f0-335b-47c9-a051-39f9eddfc745" containerID="668c09d5168634c487b6c50d19306a5c0b47ec2ff7eb2e0ea3e7c0fd28bd34c5" exitCode=0 Jan 21 16:41:16 crc kubenswrapper[5021]: I0121 16:41:16.130449 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pfxxf" event={"ID":"c1b866f0-335b-47c9-a051-39f9eddfc745","Type":"ContainerDied","Data":"668c09d5168634c487b6c50d19306a5c0b47ec2ff7eb2e0ea3e7c0fd28bd34c5"} Jan 21 16:41:18 crc kubenswrapper[5021]: I0121 16:41:18.150442 5021 generic.go:334] "Generic (PLEG): container finished" podID="c1b866f0-335b-47c9-a051-39f9eddfc745" containerID="47b4caa3cd78b70fa58802aba81899c3adee2a2d880ede6d4024a2a3332b194f" exitCode=0 Jan 21 16:41:18 crc kubenswrapper[5021]: I0121 16:41:18.150822 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pfxxf" event={"ID":"c1b866f0-335b-47c9-a051-39f9eddfc745","Type":"ContainerDied","Data":"47b4caa3cd78b70fa58802aba81899c3adee2a2d880ede6d4024a2a3332b194f"} Jan 21 16:41:18 crc kubenswrapper[5021]: I0121 16:41:18.741111 5021 scope.go:117] "RemoveContainer" containerID="f6b1fb9418fc722a7a20dda69b64ff74fe4c6b22f2589b779c023d74041121ec" Jan 21 16:41:18 crc kubenswrapper[5021]: E0121 16:41:18.741655 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:41:19 crc kubenswrapper[5021]: I0121 16:41:19.160605 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pfxxf" event={"ID":"c1b866f0-335b-47c9-a051-39f9eddfc745","Type":"ContainerStarted","Data":"02d1a40fbf1c23e8420ddc04c86b96d0bd87b04043e86fea85f2b9f5e6596906"} Jan 21 16:41:19 crc kubenswrapper[5021]: I0121 16:41:19.181063 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-pfxxf" podStartSLOduration=3.737470135 podStartE2EDuration="6.181025373s" podCreationTimestamp="2026-01-21 16:41:13 +0000 UTC" firstStartedPulling="2026-01-21 16:41:16.132233892 +0000 UTC m=+4617.667347781" lastFinishedPulling="2026-01-21 16:41:18.57578913 +0000 UTC m=+4620.110903019" observedRunningTime="2026-01-21 16:41:19.176994853 +0000 UTC m=+4620.712108762" watchObservedRunningTime="2026-01-21 16:41:19.181025373 +0000 UTC m=+4620.716139262" Jan 21 16:41:22 crc kubenswrapper[5021]: I0121 16:41:22.485405 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7ddb5c749-x4l62_724463d5-2779-4504-bbd1-4c12353a665c/manager/0.log" Jan 21 16:41:22 crc kubenswrapper[5021]: I0121 16:41:22.537623 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-9b68f5989-rkltx_33f53fd4-7cbc-4e1e-a72a-e48eee9ca274/manager/0.log" Jan 21 16:41:22 crc kubenswrapper[5021]: I0121 16:41:22.550034 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-9f958b845-tjspf_6d2f5c7c-f0d7-405b-b9cf-427ea840f7c0/manager/0.log" Jan 21 16:41:22 crc kubenswrapper[5021]: I0121 16:41:22.581397 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_f9475b8e0dbd19b900b29a99cbbde633fbf853f7ac56ad0f8ef85c6293rvznz_b61e6e25-934e-4eb5-ba83-7aca994252fc/extract/0.log" Jan 21 16:41:22 crc kubenswrapper[5021]: I0121 16:41:22.600918 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_f9475b8e0dbd19b900b29a99cbbde633fbf853f7ac56ad0f8ef85c6293rvznz_b61e6e25-934e-4eb5-ba83-7aca994252fc/util/0.log" Jan 21 16:41:22 crc kubenswrapper[5021]: I0121 16:41:22.609268 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_f9475b8e0dbd19b900b29a99cbbde633fbf853f7ac56ad0f8ef85c6293rvznz_b61e6e25-934e-4eb5-ba83-7aca994252fc/pull/0.log" Jan 21 16:41:22 crc kubenswrapper[5021]: I0121 16:41:22.697118 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-c6994669c-qswjh_8e5df137-5a39-434e-9ed8-cd984d3cfecb/manager/0.log" Jan 21 16:41:22 crc kubenswrapper[5021]: I0121 16:41:22.708410 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-594c8c9d5d-zl9lj_7645cfbe-28a4-4098-af64-3be341c2306f/manager/0.log" Jan 21 16:41:22 crc kubenswrapper[5021]: I0121 16:41:22.720791 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-77d5c5b54f-s6lsx_fcd8d00d-0a93-400d-8c23-eb51dbf56a35/manager/0.log" Jan 21 16:41:22 crc kubenswrapper[5021]: I0121 16:41:22.963699 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-77c48c7859-njwdd_696e3c0f-78c0-4517-8def-49fbe8728f48/manager/0.log" Jan 21 16:41:22 crc kubenswrapper[5021]: I0121 16:41:22.978536 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-78757b4889-2bt46_9fdec40b-ea8d-4d5e-82ac-27b0a76f450b/manager/0.log" Jan 21 16:41:23 crc kubenswrapper[5021]: I0121 16:41:23.059929 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-767fdc4f47-wj9sz_c20dfe07-4e4b-44e0-a260-ff4958985c0c/manager/0.log" Jan 21 16:41:23 crc kubenswrapper[5021]: I0121 16:41:23.070658 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-864f6b75bf-9bzn7_f9c1fef7-2823-4ebc-866a-adea991f6b5c/manager/0.log" Jan 21 16:41:23 crc kubenswrapper[5021]: I0121 16:41:23.137032 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-c87fff755-tr25w_4180df4a-4632-4c29-b5cf-a597b93d4541/manager/0.log" Jan 21 16:41:23 crc kubenswrapper[5021]: I0121 16:41:23.198598 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-cb4666565-jk57l_dd66a6c4-dee4-4079-ac1c-d838cc27f752/manager/0.log" Jan 21 16:41:23 crc kubenswrapper[5021]: I0121 16:41:23.267721 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-65849867d6-xm9sm_569260c0-7874-41fa-9114-66643a79cdfe/manager/0.log" Jan 21 16:41:23 crc kubenswrapper[5021]: I0121 16:41:23.279332 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-7fc9b76cf6-jgqdp_48921cb7-8983-4b8e-87cd-3316190ede3e/manager/0.log" Jan 21 16:41:23 crc kubenswrapper[5021]: I0121 16:41:23.302230 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-6b68b8b854sp4g6_25709945-8415-492c-a829-fd79f3fbe521/manager/0.log" Jan 21 16:41:23 crc kubenswrapper[5021]: I0121 16:41:23.454980 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-init-7f8fb8b79-5jnkm_70b7f82d-fa46-4ef3-b1f8-e790e3e4a540/operator/0.log" Jan 21 16:41:23 crc kubenswrapper[5021]: I0121 16:41:23.874679 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-pfxxf" Jan 21 16:41:23 crc kubenswrapper[5021]: I0121 16:41:23.874727 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-pfxxf" Jan 21 16:41:24 crc kubenswrapper[5021]: I0121 16:41:24.223017 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-58495d798b-lxjdg_727fe8cb-51ad-433f-90e1-5998b948799a/manager/0.log" Jan 21 16:41:24 crc kubenswrapper[5021]: I0121 16:41:24.231060 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-jwdkv_2c93cbb3-1ee5-47ca-a383-d460bf952648/registry-server/0.log" Jan 21 16:41:24 crc kubenswrapper[5021]: I0121 16:41:24.284232 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-55db956ddc-kmmh6_0ca88d8a-abb8-498b-9588-376e3cc3a49e/manager/0.log" Jan 21 16:41:24 crc kubenswrapper[5021]: I0121 16:41:24.316852 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-686df47fcb-tcnr8_00e6bc09-d424-4ed1-b62a-b6fadc7416ec/manager/0.log" Jan 21 16:41:24 crc kubenswrapper[5021]: I0121 16:41:24.332370 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-x6zvw_37d25098-ad0c-459e-b6e7-6b11d269606b/operator/0.log" Jan 21 16:41:24 crc kubenswrapper[5021]: I0121 16:41:24.363558 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-85dd56d4cc-msh7h_e0c21887-edf1-4362-b212-456e024d2cd9/manager/0.log" Jan 21 16:41:24 crc kubenswrapper[5021]: I0121 16:41:24.445959 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-5f8f495fcf-96kl8_46f232dd-a469-4c74-b456-ba1b8f80b32a/manager/0.log" Jan 21 16:41:24 crc kubenswrapper[5021]: I0121 16:41:24.457337 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-7cd8bc9dbb-r274t_25190772-2e7e-4e99-9df6-727b970a7930/manager/0.log" Jan 21 16:41:24 crc kubenswrapper[5021]: I0121 16:41:24.475943 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-64cd966744-p4nrl_794724a4-8705-4860-a126-6baefc733a24/manager/0.log" Jan 21 16:41:24 crc kubenswrapper[5021]: I0121 16:41:24.923621 5021 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-pfxxf" podUID="c1b866f0-335b-47c9-a051-39f9eddfc745" containerName="registry-server" probeResult="failure" output=< Jan 21 16:41:24 crc kubenswrapper[5021]: timeout: failed to connect service ":50051" within 1s Jan 21 16:41:24 crc kubenswrapper[5021]: > Jan 21 16:41:30 crc kubenswrapper[5021]: I0121 16:41:30.738590 5021 scope.go:117] "RemoveContainer" containerID="f6b1fb9418fc722a7a20dda69b64ff74fe4c6b22f2589b779c023d74041121ec" Jan 21 16:41:30 crc kubenswrapper[5021]: E0121 16:41:30.739590 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:41:32 crc kubenswrapper[5021]: I0121 16:41:32.038725 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-gglqm_1530383e-f6f4-47de-8302-dfe172a883e7/control-plane-machine-set-operator/0.log" Jan 21 16:41:32 crc kubenswrapper[5021]: I0121 16:41:32.054811 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-cgt27_a62ba26d-f037-478b-8dd1-47ffb968b8a6/kube-rbac-proxy/0.log" Jan 21 16:41:32 crc kubenswrapper[5021]: I0121 16:41:32.068101 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-cgt27_a62ba26d-f037-478b-8dd1-47ffb968b8a6/machine-api-operator/0.log" Jan 21 16:41:33 crc kubenswrapper[5021]: I0121 16:41:33.919766 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-pfxxf" Jan 21 16:41:33 crc kubenswrapper[5021]: I0121 16:41:33.964796 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-pfxxf" Jan 21 16:41:35 crc kubenswrapper[5021]: I0121 16:41:35.109037 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-pfxxf"] Jan 21 16:41:35 crc kubenswrapper[5021]: I0121 16:41:35.262470 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-pfxxf" podUID="c1b866f0-335b-47c9-a051-39f9eddfc745" containerName="registry-server" containerID="cri-o://02d1a40fbf1c23e8420ddc04c86b96d0bd87b04043e86fea85f2b9f5e6596906" gracePeriod=2 Jan 21 16:41:35 crc kubenswrapper[5021]: I0121 16:41:35.595182 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-7k5j6_1ed1517c-6c3c-48a1-8caf-aac692b8b088/controller/0.log" Jan 21 16:41:35 crc kubenswrapper[5021]: I0121 16:41:35.601591 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-7k5j6_1ed1517c-6c3c-48a1-8caf-aac692b8b088/kube-rbac-proxy/0.log" Jan 21 16:41:35 crc kubenswrapper[5021]: I0121 16:41:35.626222 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ld56k_97b76490-a49c-4ddc-b6ba-7fbda7094851/controller/0.log" Jan 21 16:41:36 crc kubenswrapper[5021]: I0121 16:41:36.197467 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pfxxf" Jan 21 16:41:36 crc kubenswrapper[5021]: I0121 16:41:36.232606 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c1b866f0-335b-47c9-a051-39f9eddfc745-utilities\") pod \"c1b866f0-335b-47c9-a051-39f9eddfc745\" (UID: \"c1b866f0-335b-47c9-a051-39f9eddfc745\") " Jan 21 16:41:36 crc kubenswrapper[5021]: I0121 16:41:36.232679 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c1b866f0-335b-47c9-a051-39f9eddfc745-catalog-content\") pod \"c1b866f0-335b-47c9-a051-39f9eddfc745\" (UID: \"c1b866f0-335b-47c9-a051-39f9eddfc745\") " Jan 21 16:41:36 crc kubenswrapper[5021]: I0121 16:41:36.232830 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5zbnj\" (UniqueName: \"kubernetes.io/projected/c1b866f0-335b-47c9-a051-39f9eddfc745-kube-api-access-5zbnj\") pod \"c1b866f0-335b-47c9-a051-39f9eddfc745\" (UID: \"c1b866f0-335b-47c9-a051-39f9eddfc745\") " Jan 21 16:41:36 crc kubenswrapper[5021]: I0121 16:41:36.233385 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c1b866f0-335b-47c9-a051-39f9eddfc745-utilities" (OuterVolumeSpecName: "utilities") pod "c1b866f0-335b-47c9-a051-39f9eddfc745" (UID: "c1b866f0-335b-47c9-a051-39f9eddfc745"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 16:41:36 crc kubenswrapper[5021]: I0121 16:41:36.238740 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c1b866f0-335b-47c9-a051-39f9eddfc745-kube-api-access-5zbnj" (OuterVolumeSpecName: "kube-api-access-5zbnj") pod "c1b866f0-335b-47c9-a051-39f9eddfc745" (UID: "c1b866f0-335b-47c9-a051-39f9eddfc745"). InnerVolumeSpecName "kube-api-access-5zbnj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 16:41:36 crc kubenswrapper[5021]: I0121 16:41:36.275175 5021 generic.go:334] "Generic (PLEG): container finished" podID="c1b866f0-335b-47c9-a051-39f9eddfc745" containerID="02d1a40fbf1c23e8420ddc04c86b96d0bd87b04043e86fea85f2b9f5e6596906" exitCode=0 Jan 21 16:41:36 crc kubenswrapper[5021]: I0121 16:41:36.275206 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pfxxf" Jan 21 16:41:36 crc kubenswrapper[5021]: I0121 16:41:36.275237 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pfxxf" event={"ID":"c1b866f0-335b-47c9-a051-39f9eddfc745","Type":"ContainerDied","Data":"02d1a40fbf1c23e8420ddc04c86b96d0bd87b04043e86fea85f2b9f5e6596906"} Jan 21 16:41:36 crc kubenswrapper[5021]: I0121 16:41:36.275304 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pfxxf" event={"ID":"c1b866f0-335b-47c9-a051-39f9eddfc745","Type":"ContainerDied","Data":"bfa0971909c629ad7a67e38168b01fe1279f5fa2d09edbd33ddced96de2ab4a6"} Jan 21 16:41:36 crc kubenswrapper[5021]: I0121 16:41:36.275357 5021 scope.go:117] "RemoveContainer" containerID="02d1a40fbf1c23e8420ddc04c86b96d0bd87b04043e86fea85f2b9f5e6596906" Jan 21 16:41:36 crc kubenswrapper[5021]: I0121 16:41:36.302160 5021 scope.go:117] "RemoveContainer" containerID="47b4caa3cd78b70fa58802aba81899c3adee2a2d880ede6d4024a2a3332b194f" Jan 21 16:41:36 crc kubenswrapper[5021]: I0121 16:41:36.323952 5021 scope.go:117] "RemoveContainer" containerID="668c09d5168634c487b6c50d19306a5c0b47ec2ff7eb2e0ea3e7c0fd28bd34c5" Jan 21 16:41:36 crc kubenswrapper[5021]: I0121 16:41:36.334073 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5zbnj\" (UniqueName: \"kubernetes.io/projected/c1b866f0-335b-47c9-a051-39f9eddfc745-kube-api-access-5zbnj\") on node \"crc\" DevicePath \"\"" Jan 21 16:41:36 crc kubenswrapper[5021]: I0121 16:41:36.334258 5021 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c1b866f0-335b-47c9-a051-39f9eddfc745-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 16:41:36 crc kubenswrapper[5021]: I0121 16:41:36.351706 5021 scope.go:117] "RemoveContainer" containerID="02d1a40fbf1c23e8420ddc04c86b96d0bd87b04043e86fea85f2b9f5e6596906" Jan 21 16:41:36 crc kubenswrapper[5021]: E0121 16:41:36.352351 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"02d1a40fbf1c23e8420ddc04c86b96d0bd87b04043e86fea85f2b9f5e6596906\": container with ID starting with 02d1a40fbf1c23e8420ddc04c86b96d0bd87b04043e86fea85f2b9f5e6596906 not found: ID does not exist" containerID="02d1a40fbf1c23e8420ddc04c86b96d0bd87b04043e86fea85f2b9f5e6596906" Jan 21 16:41:36 crc kubenswrapper[5021]: I0121 16:41:36.352402 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"02d1a40fbf1c23e8420ddc04c86b96d0bd87b04043e86fea85f2b9f5e6596906"} err="failed to get container status \"02d1a40fbf1c23e8420ddc04c86b96d0bd87b04043e86fea85f2b9f5e6596906\": rpc error: code = NotFound desc = could not find container \"02d1a40fbf1c23e8420ddc04c86b96d0bd87b04043e86fea85f2b9f5e6596906\": container with ID starting with 02d1a40fbf1c23e8420ddc04c86b96d0bd87b04043e86fea85f2b9f5e6596906 not found: ID does not exist" Jan 21 16:41:36 crc kubenswrapper[5021]: I0121 16:41:36.352436 5021 scope.go:117] "RemoveContainer" containerID="47b4caa3cd78b70fa58802aba81899c3adee2a2d880ede6d4024a2a3332b194f" Jan 21 16:41:36 crc kubenswrapper[5021]: E0121 16:41:36.352704 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"47b4caa3cd78b70fa58802aba81899c3adee2a2d880ede6d4024a2a3332b194f\": container with ID starting with 47b4caa3cd78b70fa58802aba81899c3adee2a2d880ede6d4024a2a3332b194f not found: ID does not exist" containerID="47b4caa3cd78b70fa58802aba81899c3adee2a2d880ede6d4024a2a3332b194f" Jan 21 16:41:36 crc kubenswrapper[5021]: I0121 16:41:36.352722 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"47b4caa3cd78b70fa58802aba81899c3adee2a2d880ede6d4024a2a3332b194f"} err="failed to get container status \"47b4caa3cd78b70fa58802aba81899c3adee2a2d880ede6d4024a2a3332b194f\": rpc error: code = NotFound desc = could not find container \"47b4caa3cd78b70fa58802aba81899c3adee2a2d880ede6d4024a2a3332b194f\": container with ID starting with 47b4caa3cd78b70fa58802aba81899c3adee2a2d880ede6d4024a2a3332b194f not found: ID does not exist" Jan 21 16:41:36 crc kubenswrapper[5021]: I0121 16:41:36.352738 5021 scope.go:117] "RemoveContainer" containerID="668c09d5168634c487b6c50d19306a5c0b47ec2ff7eb2e0ea3e7c0fd28bd34c5" Jan 21 16:41:36 crc kubenswrapper[5021]: E0121 16:41:36.353067 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"668c09d5168634c487b6c50d19306a5c0b47ec2ff7eb2e0ea3e7c0fd28bd34c5\": container with ID starting with 668c09d5168634c487b6c50d19306a5c0b47ec2ff7eb2e0ea3e7c0fd28bd34c5 not found: ID does not exist" containerID="668c09d5168634c487b6c50d19306a5c0b47ec2ff7eb2e0ea3e7c0fd28bd34c5" Jan 21 16:41:36 crc kubenswrapper[5021]: I0121 16:41:36.353113 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"668c09d5168634c487b6c50d19306a5c0b47ec2ff7eb2e0ea3e7c0fd28bd34c5"} err="failed to get container status \"668c09d5168634c487b6c50d19306a5c0b47ec2ff7eb2e0ea3e7c0fd28bd34c5\": rpc error: code = NotFound desc = could not find container \"668c09d5168634c487b6c50d19306a5c0b47ec2ff7eb2e0ea3e7c0fd28bd34c5\": container with ID starting with 668c09d5168634c487b6c50d19306a5c0b47ec2ff7eb2e0ea3e7c0fd28bd34c5 not found: ID does not exist" Jan 21 16:41:36 crc kubenswrapper[5021]: I0121 16:41:36.379678 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c1b866f0-335b-47c9-a051-39f9eddfc745-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c1b866f0-335b-47c9-a051-39f9eddfc745" (UID: "c1b866f0-335b-47c9-a051-39f9eddfc745"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 16:41:36 crc kubenswrapper[5021]: I0121 16:41:36.436217 5021 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c1b866f0-335b-47c9-a051-39f9eddfc745-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 16:41:36 crc kubenswrapper[5021]: I0121 16:41:36.614089 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-pfxxf"] Jan 21 16:41:36 crc kubenswrapper[5021]: I0121 16:41:36.624483 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-pfxxf"] Jan 21 16:41:36 crc kubenswrapper[5021]: I0121 16:41:36.751026 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c1b866f0-335b-47c9-a051-39f9eddfc745" path="/var/lib/kubelet/pods/c1b866f0-335b-47c9-a051-39f9eddfc745/volumes" Jan 21 16:41:37 crc kubenswrapper[5021]: I0121 16:41:37.419541 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ld56k_97b76490-a49c-4ddc-b6ba-7fbda7094851/frr/0.log" Jan 21 16:41:37 crc kubenswrapper[5021]: I0121 16:41:37.428041 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ld56k_97b76490-a49c-4ddc-b6ba-7fbda7094851/reloader/0.log" Jan 21 16:41:37 crc kubenswrapper[5021]: I0121 16:41:37.432438 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ld56k_97b76490-a49c-4ddc-b6ba-7fbda7094851/frr-metrics/0.log" Jan 21 16:41:37 crc kubenswrapper[5021]: I0121 16:41:37.440169 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ld56k_97b76490-a49c-4ddc-b6ba-7fbda7094851/kube-rbac-proxy/0.log" Jan 21 16:41:37 crc kubenswrapper[5021]: I0121 16:41:37.446229 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ld56k_97b76490-a49c-4ddc-b6ba-7fbda7094851/kube-rbac-proxy-frr/0.log" Jan 21 16:41:37 crc kubenswrapper[5021]: I0121 16:41:37.452879 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ld56k_97b76490-a49c-4ddc-b6ba-7fbda7094851/cp-frr-files/0.log" Jan 21 16:41:37 crc kubenswrapper[5021]: I0121 16:41:37.457708 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ld56k_97b76490-a49c-4ddc-b6ba-7fbda7094851/cp-reloader/0.log" Jan 21 16:41:37 crc kubenswrapper[5021]: I0121 16:41:37.464463 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ld56k_97b76490-a49c-4ddc-b6ba-7fbda7094851/cp-metrics/0.log" Jan 21 16:41:37 crc kubenswrapper[5021]: I0121 16:41:37.475398 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7df86c4f6c-cctjf_64c81441-7b12-4e6d-9c22-08692b3e61e2/frr-k8s-webhook-server/0.log" Jan 21 16:41:37 crc kubenswrapper[5021]: I0121 16:41:37.504197 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-5c75d9c54f-p7sb6_9a27e7e2-ee2e-4d6f-b0da-c814ebc2b1ed/manager/0.log" Jan 21 16:41:37 crc kubenswrapper[5021]: I0121 16:41:37.513860 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-7449c97c5b-q6gvc_3776b291-b33a-4d8e-bab3-cdd3a4f346e2/webhook-server/0.log" Jan 21 16:41:38 crc kubenswrapper[5021]: I0121 16:41:38.167315 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-zpz26_81ddebd8-1787-4b73-a670-11b1d0686123/speaker/0.log" Jan 21 16:41:38 crc kubenswrapper[5021]: I0121 16:41:38.172824 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-zpz26_81ddebd8-1787-4b73-a670-11b1d0686123/kube-rbac-proxy/0.log" Jan 21 16:41:40 crc kubenswrapper[5021]: I0121 16:41:40.708064 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-86cb77c54b-sctx7_4e8ca7a2-523e-4c60-8032-020eed1d3acc/cert-manager-controller/0.log" Jan 21 16:41:40 crc kubenswrapper[5021]: I0121 16:41:40.727722 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-855d9ccff4-ds76b_6fc31342-f27a-4bc1-8121-a283abb689fa/cert-manager-cainjector/0.log" Jan 21 16:41:40 crc kubenswrapper[5021]: I0121 16:41:40.736875 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-f4fb5df64-chdrq_f1b5f0fa-4a66-443b-9178-8f51aee84d1f/cert-manager-webhook/0.log" Jan 21 16:41:44 crc kubenswrapper[5021]: I0121 16:41:44.738470 5021 scope.go:117] "RemoveContainer" containerID="f6b1fb9418fc722a7a20dda69b64ff74fe4c6b22f2589b779c023d74041121ec" Jan 21 16:41:44 crc kubenswrapper[5021]: E0121 16:41:44.739242 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:41:45 crc kubenswrapper[5021]: I0121 16:41:45.413043 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-7754f76f8b-g5kll_a174b542-9e00-432f-bb4a-62bb0118d792/nmstate-console-plugin/0.log" Jan 21 16:41:45 crc kubenswrapper[5021]: I0121 16:41:45.436561 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-5mp2z_28848e86-4658-438b-8334-5cc756cab803/nmstate-handler/0.log" Jan 21 16:41:45 crc kubenswrapper[5021]: I0121 16:41:45.453652 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-xdczs_2b5cda33-cd29-42cc-a1ba-ef98996815d7/nmstate-metrics/0.log" Jan 21 16:41:45 crc kubenswrapper[5021]: I0121 16:41:45.461370 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-xdczs_2b5cda33-cd29-42cc-a1ba-ef98996815d7/kube-rbac-proxy/0.log" Jan 21 16:41:45 crc kubenswrapper[5021]: I0121 16:41:45.478198 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-646758c888-ppc7l_0db957d4-1bed-4f2b-ac99-f64e313dc52b/nmstate-operator/0.log" Jan 21 16:41:45 crc kubenswrapper[5021]: I0121 16:41:45.488945 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-8474b5b9d8-b8kws_4eeb4ade-bc6a-499d-b300-7e4feae201cb/nmstate-webhook/0.log" Jan 21 16:41:55 crc kubenswrapper[5021]: I0121 16:41:55.717560 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-7k5j6_1ed1517c-6c3c-48a1-8caf-aac692b8b088/controller/0.log" Jan 21 16:41:55 crc kubenswrapper[5021]: I0121 16:41:55.730460 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-7k5j6_1ed1517c-6c3c-48a1-8caf-aac692b8b088/kube-rbac-proxy/0.log" Jan 21 16:41:55 crc kubenswrapper[5021]: I0121 16:41:55.754652 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ld56k_97b76490-a49c-4ddc-b6ba-7fbda7094851/controller/0.log" Jan 21 16:41:57 crc kubenswrapper[5021]: I0121 16:41:57.116386 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ld56k_97b76490-a49c-4ddc-b6ba-7fbda7094851/frr/0.log" Jan 21 16:41:57 crc kubenswrapper[5021]: I0121 16:41:57.127401 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ld56k_97b76490-a49c-4ddc-b6ba-7fbda7094851/reloader/0.log" Jan 21 16:41:57 crc kubenswrapper[5021]: I0121 16:41:57.133515 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ld56k_97b76490-a49c-4ddc-b6ba-7fbda7094851/frr-metrics/0.log" Jan 21 16:41:57 crc kubenswrapper[5021]: I0121 16:41:57.141896 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ld56k_97b76490-a49c-4ddc-b6ba-7fbda7094851/kube-rbac-proxy/0.log" Jan 21 16:41:57 crc kubenswrapper[5021]: I0121 16:41:57.158117 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ld56k_97b76490-a49c-4ddc-b6ba-7fbda7094851/kube-rbac-proxy-frr/0.log" Jan 21 16:41:57 crc kubenswrapper[5021]: I0121 16:41:57.167431 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ld56k_97b76490-a49c-4ddc-b6ba-7fbda7094851/cp-frr-files/0.log" Jan 21 16:41:57 crc kubenswrapper[5021]: I0121 16:41:57.176438 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ld56k_97b76490-a49c-4ddc-b6ba-7fbda7094851/cp-reloader/0.log" Jan 21 16:41:57 crc kubenswrapper[5021]: I0121 16:41:57.185565 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ld56k_97b76490-a49c-4ddc-b6ba-7fbda7094851/cp-metrics/0.log" Jan 21 16:41:57 crc kubenswrapper[5021]: I0121 16:41:57.198036 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7df86c4f6c-cctjf_64c81441-7b12-4e6d-9c22-08692b3e61e2/frr-k8s-webhook-server/0.log" Jan 21 16:41:57 crc kubenswrapper[5021]: I0121 16:41:57.224385 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-5c75d9c54f-p7sb6_9a27e7e2-ee2e-4d6f-b0da-c814ebc2b1ed/manager/0.log" Jan 21 16:41:57 crc kubenswrapper[5021]: I0121 16:41:57.239409 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-7449c97c5b-q6gvc_3776b291-b33a-4d8e-bab3-cdd3a4f346e2/webhook-server/0.log" Jan 21 16:41:57 crc kubenswrapper[5021]: I0121 16:41:57.562017 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-zpz26_81ddebd8-1787-4b73-a670-11b1d0686123/speaker/0.log" Jan 21 16:41:57 crc kubenswrapper[5021]: I0121 16:41:57.581637 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-zpz26_81ddebd8-1787-4b73-a670-11b1d0686123/kube-rbac-proxy/0.log" Jan 21 16:41:59 crc kubenswrapper[5021]: I0121 16:41:59.737604 5021 scope.go:117] "RemoveContainer" containerID="f6b1fb9418fc722a7a20dda69b64ff74fe4c6b22f2589b779c023d74041121ec" Jan 21 16:41:59 crc kubenswrapper[5021]: E0121 16:41:59.738108 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:42:01 crc kubenswrapper[5021]: I0121 16:42:01.087165 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afkpt4_d71db597-eb63-47d5-a55f-f3951b03ff6f/extract/0.log" Jan 21 16:42:01 crc kubenswrapper[5021]: I0121 16:42:01.095436 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afkpt4_d71db597-eb63-47d5-a55f-f3951b03ff6f/util/0.log" Jan 21 16:42:01 crc kubenswrapper[5021]: I0121 16:42:01.147667 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931afkpt4_d71db597-eb63-47d5-a55f-f3951b03ff6f/pull/0.log" Jan 21 16:42:01 crc kubenswrapper[5021]: I0121 16:42:01.165016 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7t22c_55ddc891-1df3-4fbb-8491-48d0bebf2b65/extract/0.log" Jan 21 16:42:01 crc kubenswrapper[5021]: I0121 16:42:01.172755 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7t22c_55ddc891-1df3-4fbb-8491-48d0bebf2b65/util/0.log" Jan 21 16:42:01 crc kubenswrapper[5021]: I0121 16:42:01.180981 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7t22c_55ddc891-1df3-4fbb-8491-48d0bebf2b65/pull/0.log" Jan 21 16:42:01 crc kubenswrapper[5021]: I0121 16:42:01.191214 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713q868r_483d13df-3d67-4110-a5a0-7c6d4fde373f/extract/0.log" Jan 21 16:42:01 crc kubenswrapper[5021]: I0121 16:42:01.199833 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713q868r_483d13df-3d67-4110-a5a0-7c6d4fde373f/util/0.log" Jan 21 16:42:01 crc kubenswrapper[5021]: I0121 16:42:01.211008 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713q868r_483d13df-3d67-4110-a5a0-7c6d4fde373f/pull/0.log" Jan 21 16:42:02 crc kubenswrapper[5021]: I0121 16:42:02.009479 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-vl5k6_9520cbac-26ed-42dc-a4f7-b2cbd670e722/registry-server/0.log" Jan 21 16:42:02 crc kubenswrapper[5021]: I0121 16:42:02.014752 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-vl5k6_9520cbac-26ed-42dc-a4f7-b2cbd670e722/extract-utilities/0.log" Jan 21 16:42:02 crc kubenswrapper[5021]: I0121 16:42:02.022471 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-vl5k6_9520cbac-26ed-42dc-a4f7-b2cbd670e722/extract-content/0.log" Jan 21 16:42:02 crc kubenswrapper[5021]: I0121 16:42:02.881548 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-zxtgj_f987e070-06b8-47d4-bc4a-441649d5d9e9/registry-server/0.log" Jan 21 16:42:02 crc kubenswrapper[5021]: I0121 16:42:02.887639 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-zxtgj_f987e070-06b8-47d4-bc4a-441649d5d9e9/extract-utilities/0.log" Jan 21 16:42:02 crc kubenswrapper[5021]: I0121 16:42:02.896521 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-zxtgj_f987e070-06b8-47d4-bc4a-441649d5d9e9/extract-content/0.log" Jan 21 16:42:02 crc kubenswrapper[5021]: I0121 16:42:02.913410 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-w2pss_3139b142-599c-4da3-9a0d-5facd5ca28cc/marketplace-operator/0.log" Jan 21 16:42:03 crc kubenswrapper[5021]: I0121 16:42:03.113943 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-qbdg4_da685ff8-e5ae-4316-914d-690f55d41325/registry-server/0.log" Jan 21 16:42:03 crc kubenswrapper[5021]: I0121 16:42:03.119127 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-qbdg4_da685ff8-e5ae-4316-914d-690f55d41325/extract-utilities/0.log" Jan 21 16:42:03 crc kubenswrapper[5021]: I0121 16:42:03.127902 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-qbdg4_da685ff8-e5ae-4316-914d-690f55d41325/extract-content/0.log" Jan 21 16:42:03 crc kubenswrapper[5021]: I0121 16:42:03.913144 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-xgz8b_b45643f8-e7b2-4c4f-8da1-b4e753886a05/registry-server/0.log" Jan 21 16:42:03 crc kubenswrapper[5021]: I0121 16:42:03.919428 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-xgz8b_b45643f8-e7b2-4c4f-8da1-b4e753886a05/extract-utilities/0.log" Jan 21 16:42:03 crc kubenswrapper[5021]: I0121 16:42:03.927559 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-xgz8b_b45643f8-e7b2-4c4f-8da1-b4e753886a05/extract-content/0.log" Jan 21 16:42:13 crc kubenswrapper[5021]: I0121 16:42:13.737243 5021 scope.go:117] "RemoveContainer" containerID="f6b1fb9418fc722a7a20dda69b64ff74fe4c6b22f2589b779c023d74041121ec" Jan 21 16:42:13 crc kubenswrapper[5021]: E0121 16:42:13.738145 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:42:25 crc kubenswrapper[5021]: I0121 16:42:25.737636 5021 scope.go:117] "RemoveContainer" containerID="f6b1fb9418fc722a7a20dda69b64ff74fe4c6b22f2589b779c023d74041121ec" Jan 21 16:42:25 crc kubenswrapper[5021]: E0121 16:42:25.739610 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:42:36 crc kubenswrapper[5021]: I0121 16:42:36.738994 5021 scope.go:117] "RemoveContainer" containerID="f6b1fb9418fc722a7a20dda69b64ff74fe4c6b22f2589b779c023d74041121ec" Jan 21 16:42:36 crc kubenswrapper[5021]: E0121 16:42:36.739844 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:42:43 crc kubenswrapper[5021]: I0121 16:42:43.375614 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-86cb77c54b-sctx7_4e8ca7a2-523e-4c60-8032-020eed1d3acc/cert-manager-controller/0.log" Jan 21 16:42:43 crc kubenswrapper[5021]: I0121 16:42:43.397462 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-855d9ccff4-ds76b_6fc31342-f27a-4bc1-8121-a283abb689fa/cert-manager-cainjector/0.log" Jan 21 16:42:43 crc kubenswrapper[5021]: I0121 16:42:43.407419 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-f4fb5df64-chdrq_f1b5f0fa-4a66-443b-9178-8f51aee84d1f/cert-manager-webhook/0.log" Jan 21 16:42:43 crc kubenswrapper[5021]: I0121 16:42:43.504180 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-7k5j6_1ed1517c-6c3c-48a1-8caf-aac692b8b088/controller/0.log" Jan 21 16:42:43 crc kubenswrapper[5021]: I0121 16:42:43.511884 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-7k5j6_1ed1517c-6c3c-48a1-8caf-aac692b8b088/kube-rbac-proxy/0.log" Jan 21 16:42:43 crc kubenswrapper[5021]: I0121 16:42:43.532699 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ld56k_97b76490-a49c-4ddc-b6ba-7fbda7094851/controller/0.log" Jan 21 16:42:44 crc kubenswrapper[5021]: I0121 16:42:44.387604 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7ddb5c749-x4l62_724463d5-2779-4504-bbd1-4c12353a665c/manager/0.log" Jan 21 16:42:44 crc kubenswrapper[5021]: I0121 16:42:44.441837 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-9b68f5989-rkltx_33f53fd4-7cbc-4e1e-a72a-e48eee9ca274/manager/0.log" Jan 21 16:42:44 crc kubenswrapper[5021]: I0121 16:42:44.461129 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-9f958b845-tjspf_6d2f5c7c-f0d7-405b-b9cf-427ea840f7c0/manager/0.log" Jan 21 16:42:44 crc kubenswrapper[5021]: I0121 16:42:44.477205 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_f9475b8e0dbd19b900b29a99cbbde633fbf853f7ac56ad0f8ef85c6293rvznz_b61e6e25-934e-4eb5-ba83-7aca994252fc/extract/0.log" Jan 21 16:42:44 crc kubenswrapper[5021]: I0121 16:42:44.483839 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_f9475b8e0dbd19b900b29a99cbbde633fbf853f7ac56ad0f8ef85c6293rvznz_b61e6e25-934e-4eb5-ba83-7aca994252fc/util/0.log" Jan 21 16:42:44 crc kubenswrapper[5021]: I0121 16:42:44.492804 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_f9475b8e0dbd19b900b29a99cbbde633fbf853f7ac56ad0f8ef85c6293rvznz_b61e6e25-934e-4eb5-ba83-7aca994252fc/pull/0.log" Jan 21 16:42:44 crc kubenswrapper[5021]: I0121 16:42:44.631788 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-c6994669c-qswjh_8e5df137-5a39-434e-9ed8-cd984d3cfecb/manager/0.log" Jan 21 16:42:44 crc kubenswrapper[5021]: I0121 16:42:44.680026 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-594c8c9d5d-zl9lj_7645cfbe-28a4-4098-af64-3be341c2306f/manager/0.log" Jan 21 16:42:44 crc kubenswrapper[5021]: I0121 16:42:44.694164 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-77d5c5b54f-s6lsx_fcd8d00d-0a93-400d-8c23-eb51dbf56a35/manager/0.log" Jan 21 16:42:45 crc kubenswrapper[5021]: I0121 16:42:45.065322 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-77c48c7859-njwdd_696e3c0f-78c0-4517-8def-49fbe8728f48/manager/0.log" Jan 21 16:42:45 crc kubenswrapper[5021]: I0121 16:42:45.085965 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-78757b4889-2bt46_9fdec40b-ea8d-4d5e-82ac-27b0a76f450b/manager/0.log" Jan 21 16:42:45 crc kubenswrapper[5021]: I0121 16:42:45.183374 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-767fdc4f47-wj9sz_c20dfe07-4e4b-44e0-a260-ff4958985c0c/manager/0.log" Jan 21 16:42:45 crc kubenswrapper[5021]: I0121 16:42:45.213530 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-864f6b75bf-9bzn7_f9c1fef7-2823-4ebc-866a-adea991f6b5c/manager/0.log" Jan 21 16:42:45 crc kubenswrapper[5021]: I0121 16:42:45.249372 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ld56k_97b76490-a49c-4ddc-b6ba-7fbda7094851/frr/0.log" Jan 21 16:42:45 crc kubenswrapper[5021]: I0121 16:42:45.260126 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ld56k_97b76490-a49c-4ddc-b6ba-7fbda7094851/reloader/0.log" Jan 21 16:42:45 crc kubenswrapper[5021]: I0121 16:42:45.268968 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ld56k_97b76490-a49c-4ddc-b6ba-7fbda7094851/frr-metrics/0.log" Jan 21 16:42:45 crc kubenswrapper[5021]: I0121 16:42:45.275330 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-c87fff755-tr25w_4180df4a-4632-4c29-b5cf-a597b93d4541/manager/0.log" Jan 21 16:42:45 crc kubenswrapper[5021]: I0121 16:42:45.276422 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ld56k_97b76490-a49c-4ddc-b6ba-7fbda7094851/kube-rbac-proxy/0.log" Jan 21 16:42:45 crc kubenswrapper[5021]: I0121 16:42:45.294655 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ld56k_97b76490-a49c-4ddc-b6ba-7fbda7094851/kube-rbac-proxy-frr/0.log" Jan 21 16:42:45 crc kubenswrapper[5021]: I0121 16:42:45.312086 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ld56k_97b76490-a49c-4ddc-b6ba-7fbda7094851/cp-frr-files/0.log" Jan 21 16:42:45 crc kubenswrapper[5021]: I0121 16:42:45.320472 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ld56k_97b76490-a49c-4ddc-b6ba-7fbda7094851/cp-reloader/0.log" Jan 21 16:42:45 crc kubenswrapper[5021]: I0121 16:42:45.320656 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-cb4666565-jk57l_dd66a6c4-dee4-4079-ac1c-d838cc27f752/manager/0.log" Jan 21 16:42:45 crc kubenswrapper[5021]: I0121 16:42:45.329213 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ld56k_97b76490-a49c-4ddc-b6ba-7fbda7094851/cp-metrics/0.log" Jan 21 16:42:45 crc kubenswrapper[5021]: I0121 16:42:45.343173 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7df86c4f6c-cctjf_64c81441-7b12-4e6d-9c22-08692b3e61e2/frr-k8s-webhook-server/0.log" Jan 21 16:42:45 crc kubenswrapper[5021]: I0121 16:42:45.389307 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-5c75d9c54f-p7sb6_9a27e7e2-ee2e-4d6f-b0da-c814ebc2b1ed/manager/0.log" Jan 21 16:42:45 crc kubenswrapper[5021]: I0121 16:42:45.403625 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-7449c97c5b-q6gvc_3776b291-b33a-4d8e-bab3-cdd3a4f346e2/webhook-server/0.log" Jan 21 16:42:45 crc kubenswrapper[5021]: I0121 16:42:45.440057 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-65849867d6-xm9sm_569260c0-7874-41fa-9114-66643a79cdfe/manager/0.log" Jan 21 16:42:45 crc kubenswrapper[5021]: I0121 16:42:45.458004 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-7fc9b76cf6-jgqdp_48921cb7-8983-4b8e-87cd-3316190ede3e/manager/0.log" Jan 21 16:42:45 crc kubenswrapper[5021]: I0121 16:42:45.492331 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-6b68b8b854sp4g6_25709945-8415-492c-a829-fd79f3fbe521/manager/0.log" Jan 21 16:42:45 crc kubenswrapper[5021]: I0121 16:42:45.754012 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-init-7f8fb8b79-5jnkm_70b7f82d-fa46-4ef3-b1f8-e790e3e4a540/operator/0.log" Jan 21 16:42:45 crc kubenswrapper[5021]: I0121 16:42:45.870059 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-zpz26_81ddebd8-1787-4b73-a670-11b1d0686123/speaker/0.log" Jan 21 16:42:45 crc kubenswrapper[5021]: I0121 16:42:45.894065 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-zpz26_81ddebd8-1787-4b73-a670-11b1d0686123/kube-rbac-proxy/0.log" Jan 21 16:42:46 crc kubenswrapper[5021]: I0121 16:42:46.519547 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-58495d798b-lxjdg_727fe8cb-51ad-433f-90e1-5998b948799a/manager/0.log" Jan 21 16:42:46 crc kubenswrapper[5021]: I0121 16:42:46.700801 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-jwdkv_2c93cbb3-1ee5-47ca-a383-d460bf952648/registry-server/0.log" Jan 21 16:42:46 crc kubenswrapper[5021]: I0121 16:42:46.754447 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-55db956ddc-kmmh6_0ca88d8a-abb8-498b-9588-376e3cc3a49e/manager/0.log" Jan 21 16:42:46 crc kubenswrapper[5021]: I0121 16:42:46.790256 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-686df47fcb-tcnr8_00e6bc09-d424-4ed1-b62a-b6fadc7416ec/manager/0.log" Jan 21 16:42:46 crc kubenswrapper[5021]: I0121 16:42:46.806291 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-x6zvw_37d25098-ad0c-459e-b6e7-6b11d269606b/operator/0.log" Jan 21 16:42:46 crc kubenswrapper[5021]: I0121 16:42:46.844565 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-85dd56d4cc-msh7h_e0c21887-edf1-4362-b212-456e024d2cd9/manager/0.log" Jan 21 16:42:46 crc kubenswrapper[5021]: I0121 16:42:46.931696 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-5f8f495fcf-96kl8_46f232dd-a469-4c74-b456-ba1b8f80b32a/manager/0.log" Jan 21 16:42:46 crc kubenswrapper[5021]: I0121 16:42:46.934093 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-86cb77c54b-sctx7_4e8ca7a2-523e-4c60-8032-020eed1d3acc/cert-manager-controller/0.log" Jan 21 16:42:46 crc kubenswrapper[5021]: I0121 16:42:46.943207 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-7cd8bc9dbb-r274t_25190772-2e7e-4e99-9df6-727b970a7930/manager/0.log" Jan 21 16:42:46 crc kubenswrapper[5021]: I0121 16:42:46.959215 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-855d9ccff4-ds76b_6fc31342-f27a-4bc1-8121-a283abb689fa/cert-manager-cainjector/0.log" Jan 21 16:42:46 crc kubenswrapper[5021]: I0121 16:42:46.959461 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-64cd966744-p4nrl_794724a4-8705-4860-a126-6baefc733a24/manager/0.log" Jan 21 16:42:46 crc kubenswrapper[5021]: I0121 16:42:46.970787 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-f4fb5df64-chdrq_f1b5f0fa-4a66-443b-9178-8f51aee84d1f/cert-manager-webhook/0.log" Jan 21 16:42:47 crc kubenswrapper[5021]: I0121 16:42:47.595313 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-gglqm_1530383e-f6f4-47de-8302-dfe172a883e7/control-plane-machine-set-operator/0.log" Jan 21 16:42:47 crc kubenswrapper[5021]: I0121 16:42:47.611235 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-cgt27_a62ba26d-f037-478b-8dd1-47ffb968b8a6/kube-rbac-proxy/0.log" Jan 21 16:42:47 crc kubenswrapper[5021]: I0121 16:42:47.625582 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-cgt27_a62ba26d-f037-478b-8dd1-47ffb968b8a6/machine-api-operator/0.log" Jan 21 16:42:48 crc kubenswrapper[5021]: I0121 16:42:48.271038 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-7754f76f8b-g5kll_a174b542-9e00-432f-bb4a-62bb0118d792/nmstate-console-plugin/0.log" Jan 21 16:42:48 crc kubenswrapper[5021]: I0121 16:42:48.287270 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-5mp2z_28848e86-4658-438b-8334-5cc756cab803/nmstate-handler/0.log" Jan 21 16:42:48 crc kubenswrapper[5021]: I0121 16:42:48.302636 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-xdczs_2b5cda33-cd29-42cc-a1ba-ef98996815d7/nmstate-metrics/0.log" Jan 21 16:42:48 crc kubenswrapper[5021]: I0121 16:42:48.313283 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-xdczs_2b5cda33-cd29-42cc-a1ba-ef98996815d7/kube-rbac-proxy/0.log" Jan 21 16:42:48 crc kubenswrapper[5021]: I0121 16:42:48.326418 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-646758c888-ppc7l_0db957d4-1bed-4f2b-ac99-f64e313dc52b/nmstate-operator/0.log" Jan 21 16:42:48 crc kubenswrapper[5021]: I0121 16:42:48.335892 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-8474b5b9d8-b8kws_4eeb4ade-bc6a-499d-b300-7e4feae201cb/nmstate-webhook/0.log" Jan 21 16:42:48 crc kubenswrapper[5021]: I0121 16:42:48.499205 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7ddb5c749-x4l62_724463d5-2779-4504-bbd1-4c12353a665c/manager/0.log" Jan 21 16:42:48 crc kubenswrapper[5021]: I0121 16:42:48.544954 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-9b68f5989-rkltx_33f53fd4-7cbc-4e1e-a72a-e48eee9ca274/manager/0.log" Jan 21 16:42:48 crc kubenswrapper[5021]: I0121 16:42:48.558371 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-9f958b845-tjspf_6d2f5c7c-f0d7-405b-b9cf-427ea840f7c0/manager/0.log" Jan 21 16:42:48 crc kubenswrapper[5021]: I0121 16:42:48.572195 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_f9475b8e0dbd19b900b29a99cbbde633fbf853f7ac56ad0f8ef85c6293rvznz_b61e6e25-934e-4eb5-ba83-7aca994252fc/extract/0.log" Jan 21 16:42:48 crc kubenswrapper[5021]: I0121 16:42:48.584663 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_f9475b8e0dbd19b900b29a99cbbde633fbf853f7ac56ad0f8ef85c6293rvznz_b61e6e25-934e-4eb5-ba83-7aca994252fc/util/0.log" Jan 21 16:42:48 crc kubenswrapper[5021]: I0121 16:42:48.594779 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_f9475b8e0dbd19b900b29a99cbbde633fbf853f7ac56ad0f8ef85c6293rvznz_b61e6e25-934e-4eb5-ba83-7aca994252fc/pull/0.log" Jan 21 16:42:48 crc kubenswrapper[5021]: I0121 16:42:48.698898 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-c6994669c-qswjh_8e5df137-5a39-434e-9ed8-cd984d3cfecb/manager/0.log" Jan 21 16:42:48 crc kubenswrapper[5021]: I0121 16:42:48.710258 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-594c8c9d5d-zl9lj_7645cfbe-28a4-4098-af64-3be341c2306f/manager/0.log" Jan 21 16:42:48 crc kubenswrapper[5021]: I0121 16:42:48.726193 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-77d5c5b54f-s6lsx_fcd8d00d-0a93-400d-8c23-eb51dbf56a35/manager/0.log" Jan 21 16:42:49 crc kubenswrapper[5021]: I0121 16:42:49.003540 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-77c48c7859-njwdd_696e3c0f-78c0-4517-8def-49fbe8728f48/manager/0.log" Jan 21 16:42:49 crc kubenswrapper[5021]: I0121 16:42:49.020093 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-78757b4889-2bt46_9fdec40b-ea8d-4d5e-82ac-27b0a76f450b/manager/0.log" Jan 21 16:42:49 crc kubenswrapper[5021]: I0121 16:42:49.102789 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-767fdc4f47-wj9sz_c20dfe07-4e4b-44e0-a260-ff4958985c0c/manager/0.log" Jan 21 16:42:49 crc kubenswrapper[5021]: I0121 16:42:49.114534 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-864f6b75bf-9bzn7_f9c1fef7-2823-4ebc-866a-adea991f6b5c/manager/0.log" Jan 21 16:42:49 crc kubenswrapper[5021]: I0121 16:42:49.169180 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-c87fff755-tr25w_4180df4a-4632-4c29-b5cf-a597b93d4541/manager/0.log" Jan 21 16:42:49 crc kubenswrapper[5021]: I0121 16:42:49.234117 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-cb4666565-jk57l_dd66a6c4-dee4-4079-ac1c-d838cc27f752/manager/0.log" Jan 21 16:42:49 crc kubenswrapper[5021]: I0121 16:42:49.347221 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-65849867d6-xm9sm_569260c0-7874-41fa-9114-66643a79cdfe/manager/0.log" Jan 21 16:42:49 crc kubenswrapper[5021]: I0121 16:42:49.358561 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-7fc9b76cf6-jgqdp_48921cb7-8983-4b8e-87cd-3316190ede3e/manager/0.log" Jan 21 16:42:49 crc kubenswrapper[5021]: I0121 16:42:49.373741 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-6b68b8b854sp4g6_25709945-8415-492c-a829-fd79f3fbe521/manager/0.log" Jan 21 16:42:49 crc kubenswrapper[5021]: I0121 16:42:49.561800 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-init-7f8fb8b79-5jnkm_70b7f82d-fa46-4ef3-b1f8-e790e3e4a540/operator/0.log" Jan 21 16:42:50 crc kubenswrapper[5021]: I0121 16:42:50.539150 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-58495d798b-lxjdg_727fe8cb-51ad-433f-90e1-5998b948799a/manager/0.log" Jan 21 16:42:50 crc kubenswrapper[5021]: I0121 16:42:50.644268 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-jwdkv_2c93cbb3-1ee5-47ca-a383-d460bf952648/registry-server/0.log" Jan 21 16:42:50 crc kubenswrapper[5021]: I0121 16:42:50.698165 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-55db956ddc-kmmh6_0ca88d8a-abb8-498b-9588-376e3cc3a49e/manager/0.log" Jan 21 16:42:50 crc kubenswrapper[5021]: I0121 16:42:50.729083 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-686df47fcb-tcnr8_00e6bc09-d424-4ed1-b62a-b6fadc7416ec/manager/0.log" Jan 21 16:42:50 crc kubenswrapper[5021]: I0121 16:42:50.738241 5021 scope.go:117] "RemoveContainer" containerID="f6b1fb9418fc722a7a20dda69b64ff74fe4c6b22f2589b779c023d74041121ec" Jan 21 16:42:50 crc kubenswrapper[5021]: I0121 16:42:50.744297 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-x6zvw_37d25098-ad0c-459e-b6e7-6b11d269606b/operator/0.log" Jan 21 16:42:50 crc kubenswrapper[5021]: I0121 16:42:50.785804 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-85dd56d4cc-msh7h_e0c21887-edf1-4362-b212-456e024d2cd9/manager/0.log" Jan 21 16:42:50 crc kubenswrapper[5021]: I0121 16:42:50.873612 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-5f8f495fcf-96kl8_46f232dd-a469-4c74-b456-ba1b8f80b32a/manager/0.log" Jan 21 16:42:50 crc kubenswrapper[5021]: I0121 16:42:50.884589 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-7cd8bc9dbb-r274t_25190772-2e7e-4e99-9df6-727b970a7930/manager/0.log" Jan 21 16:42:50 crc kubenswrapper[5021]: I0121 16:42:50.904365 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-64cd966744-p4nrl_794724a4-8705-4860-a126-6baefc733a24/manager/0.log" Jan 21 16:42:51 crc kubenswrapper[5021]: I0121 16:42:51.841052 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" event={"ID":"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1","Type":"ContainerStarted","Data":"700ef1326c523225e656c6ac7ec1b4a85773ca798d07c93b076a672f21eb50a5"} Jan 21 16:42:52 crc kubenswrapper[5021]: I0121 16:42:52.852169 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-additional-cni-plugins-k9hxg_ddf892f9-a048-4335-995e-de581763d230/kube-multus-additional-cni-plugins/0.log" Jan 21 16:42:52 crc kubenswrapper[5021]: I0121 16:42:52.860076 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-additional-cni-plugins-k9hxg_ddf892f9-a048-4335-995e-de581763d230/egress-router-binary-copy/0.log" Jan 21 16:42:52 crc kubenswrapper[5021]: I0121 16:42:52.866093 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-additional-cni-plugins-k9hxg_ddf892f9-a048-4335-995e-de581763d230/cni-plugins/0.log" Jan 21 16:42:52 crc kubenswrapper[5021]: I0121 16:42:52.874390 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-additional-cni-plugins-k9hxg_ddf892f9-a048-4335-995e-de581763d230/bond-cni-plugin/0.log" Jan 21 16:42:52 crc kubenswrapper[5021]: I0121 16:42:52.886603 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-additional-cni-plugins-k9hxg_ddf892f9-a048-4335-995e-de581763d230/routeoverride-cni/0.log" Jan 21 16:42:52 crc kubenswrapper[5021]: I0121 16:42:52.894171 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-additional-cni-plugins-k9hxg_ddf892f9-a048-4335-995e-de581763d230/whereabouts-cni-bincopy/0.log" Jan 21 16:42:52 crc kubenswrapper[5021]: I0121 16:42:52.900780 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-additional-cni-plugins-k9hxg_ddf892f9-a048-4335-995e-de581763d230/whereabouts-cni/0.log" Jan 21 16:42:52 crc kubenswrapper[5021]: I0121 16:42:52.923829 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-admission-controller-857f4d67dd-gm5rm_0654cecd-38e1-4678-9452-5e8b8b1dd07f/multus-admission-controller/0.log" Jan 21 16:42:52 crc kubenswrapper[5021]: I0121 16:42:52.928629 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-admission-controller-857f4d67dd-gm5rm_0654cecd-38e1-4678-9452-5e8b8b1dd07f/kube-rbac-proxy/0.log" Jan 21 16:42:52 crc kubenswrapper[5021]: I0121 16:42:52.969683 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-sd7j9_49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a/kube-multus/2.log" Jan 21 16:42:53 crc kubenswrapper[5021]: I0121 16:42:53.021018 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-sd7j9_49b7ceaa-55d0-4bb2-8ff2-ee9da865da0a/kube-multus/3.log" Jan 21 16:42:53 crc kubenswrapper[5021]: I0121 16:42:53.055042 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_network-metrics-daemon-xtd2p_cb60592c-6770-457b-b2ae-2c6c8f2a4149/network-metrics-daemon/0.log" Jan 21 16:42:53 crc kubenswrapper[5021]: I0121 16:42:53.060810 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_network-metrics-daemon-xtd2p_cb60592c-6770-457b-b2ae-2c6c8f2a4149/kube-rbac-proxy/0.log" Jan 21 16:42:59 crc kubenswrapper[5021]: I0121 16:42:59.629008 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-6qvpz"] Jan 21 16:42:59 crc kubenswrapper[5021]: E0121 16:42:59.630194 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1b866f0-335b-47c9-a051-39f9eddfc745" containerName="registry-server" Jan 21 16:42:59 crc kubenswrapper[5021]: I0121 16:42:59.630227 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1b866f0-335b-47c9-a051-39f9eddfc745" containerName="registry-server" Jan 21 16:42:59 crc kubenswrapper[5021]: E0121 16:42:59.630266 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1b866f0-335b-47c9-a051-39f9eddfc745" containerName="extract-content" Jan 21 16:42:59 crc kubenswrapper[5021]: I0121 16:42:59.630283 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1b866f0-335b-47c9-a051-39f9eddfc745" containerName="extract-content" Jan 21 16:42:59 crc kubenswrapper[5021]: E0121 16:42:59.630311 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1b866f0-335b-47c9-a051-39f9eddfc745" containerName="extract-utilities" Jan 21 16:42:59 crc kubenswrapper[5021]: I0121 16:42:59.630328 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1b866f0-335b-47c9-a051-39f9eddfc745" containerName="extract-utilities" Jan 21 16:42:59 crc kubenswrapper[5021]: I0121 16:42:59.630666 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="c1b866f0-335b-47c9-a051-39f9eddfc745" containerName="registry-server" Jan 21 16:42:59 crc kubenswrapper[5021]: I0121 16:42:59.636220 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6qvpz"] Jan 21 16:42:59 crc kubenswrapper[5021]: I0121 16:42:59.637947 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6qvpz" Jan 21 16:42:59 crc kubenswrapper[5021]: I0121 16:42:59.752604 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b6903209-5819-4f55-9c74-d174a30c4f3d-utilities\") pod \"redhat-marketplace-6qvpz\" (UID: \"b6903209-5819-4f55-9c74-d174a30c4f3d\") " pod="openshift-marketplace/redhat-marketplace-6qvpz" Jan 21 16:42:59 crc kubenswrapper[5021]: I0121 16:42:59.752698 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rkm6g\" (UniqueName: \"kubernetes.io/projected/b6903209-5819-4f55-9c74-d174a30c4f3d-kube-api-access-rkm6g\") pod \"redhat-marketplace-6qvpz\" (UID: \"b6903209-5819-4f55-9c74-d174a30c4f3d\") " pod="openshift-marketplace/redhat-marketplace-6qvpz" Jan 21 16:42:59 crc kubenswrapper[5021]: I0121 16:42:59.752759 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b6903209-5819-4f55-9c74-d174a30c4f3d-catalog-content\") pod \"redhat-marketplace-6qvpz\" (UID: \"b6903209-5819-4f55-9c74-d174a30c4f3d\") " pod="openshift-marketplace/redhat-marketplace-6qvpz" Jan 21 16:42:59 crc kubenswrapper[5021]: I0121 16:42:59.854004 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b6903209-5819-4f55-9c74-d174a30c4f3d-utilities\") pod \"redhat-marketplace-6qvpz\" (UID: \"b6903209-5819-4f55-9c74-d174a30c4f3d\") " pod="openshift-marketplace/redhat-marketplace-6qvpz" Jan 21 16:42:59 crc kubenswrapper[5021]: I0121 16:42:59.854170 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rkm6g\" (UniqueName: \"kubernetes.io/projected/b6903209-5819-4f55-9c74-d174a30c4f3d-kube-api-access-rkm6g\") pod \"redhat-marketplace-6qvpz\" (UID: \"b6903209-5819-4f55-9c74-d174a30c4f3d\") " pod="openshift-marketplace/redhat-marketplace-6qvpz" Jan 21 16:42:59 crc kubenswrapper[5021]: I0121 16:42:59.854271 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b6903209-5819-4f55-9c74-d174a30c4f3d-catalog-content\") pod \"redhat-marketplace-6qvpz\" (UID: \"b6903209-5819-4f55-9c74-d174a30c4f3d\") " pod="openshift-marketplace/redhat-marketplace-6qvpz" Jan 21 16:42:59 crc kubenswrapper[5021]: I0121 16:42:59.855118 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b6903209-5819-4f55-9c74-d174a30c4f3d-catalog-content\") pod \"redhat-marketplace-6qvpz\" (UID: \"b6903209-5819-4f55-9c74-d174a30c4f3d\") " pod="openshift-marketplace/redhat-marketplace-6qvpz" Jan 21 16:42:59 crc kubenswrapper[5021]: I0121 16:42:59.856099 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b6903209-5819-4f55-9c74-d174a30c4f3d-utilities\") pod \"redhat-marketplace-6qvpz\" (UID: \"b6903209-5819-4f55-9c74-d174a30c4f3d\") " pod="openshift-marketplace/redhat-marketplace-6qvpz" Jan 21 16:42:59 crc kubenswrapper[5021]: I0121 16:42:59.875887 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rkm6g\" (UniqueName: \"kubernetes.io/projected/b6903209-5819-4f55-9c74-d174a30c4f3d-kube-api-access-rkm6g\") pod \"redhat-marketplace-6qvpz\" (UID: \"b6903209-5819-4f55-9c74-d174a30c4f3d\") " pod="openshift-marketplace/redhat-marketplace-6qvpz" Jan 21 16:42:59 crc kubenswrapper[5021]: I0121 16:42:59.967821 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6qvpz" Jan 21 16:43:00 crc kubenswrapper[5021]: I0121 16:43:00.403452 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6qvpz"] Jan 21 16:43:00 crc kubenswrapper[5021]: I0121 16:43:00.900423 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6qvpz" event={"ID":"b6903209-5819-4f55-9c74-d174a30c4f3d","Type":"ContainerStarted","Data":"c759fa15377107550fcb8bb1f5c09b97ab11d9ffe8e477fe623cfd73cfc5df6a"} Jan 21 16:43:01 crc kubenswrapper[5021]: I0121 16:43:01.908773 5021 generic.go:334] "Generic (PLEG): container finished" podID="b6903209-5819-4f55-9c74-d174a30c4f3d" containerID="94450846bb54ba1b404f1994c814f8a6c7b55a11628a60d7aa7b030766804960" exitCode=0 Jan 21 16:43:01 crc kubenswrapper[5021]: I0121 16:43:01.908846 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6qvpz" event={"ID":"b6903209-5819-4f55-9c74-d174a30c4f3d","Type":"ContainerDied","Data":"94450846bb54ba1b404f1994c814f8a6c7b55a11628a60d7aa7b030766804960"} Jan 21 16:43:02 crc kubenswrapper[5021]: I0121 16:43:02.943855 5021 generic.go:334] "Generic (PLEG): container finished" podID="b6903209-5819-4f55-9c74-d174a30c4f3d" containerID="a4091fc96f533f3d784bbf339a2c20959f0c3d202db1e0aa4a71501584cd39ce" exitCode=0 Jan 21 16:43:02 crc kubenswrapper[5021]: I0121 16:43:02.943998 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6qvpz" event={"ID":"b6903209-5819-4f55-9c74-d174a30c4f3d","Type":"ContainerDied","Data":"a4091fc96f533f3d784bbf339a2c20959f0c3d202db1e0aa4a71501584cd39ce"} Jan 21 16:43:03 crc kubenswrapper[5021]: I0121 16:43:03.955343 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6qvpz" event={"ID":"b6903209-5819-4f55-9c74-d174a30c4f3d","Type":"ContainerStarted","Data":"c203a9d16d673e21663af6bca0b06610af4f8b848787013e72a50e443c0b2f4b"} Jan 21 16:43:03 crc kubenswrapper[5021]: I0121 16:43:03.976603 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-6qvpz" podStartSLOduration=3.479188381 podStartE2EDuration="4.976580735s" podCreationTimestamp="2026-01-21 16:42:59 +0000 UTC" firstStartedPulling="2026-01-21 16:43:01.910446134 +0000 UTC m=+4723.445560023" lastFinishedPulling="2026-01-21 16:43:03.407838498 +0000 UTC m=+4724.942952377" observedRunningTime="2026-01-21 16:43:03.971300852 +0000 UTC m=+4725.506414741" watchObservedRunningTime="2026-01-21 16:43:03.976580735 +0000 UTC m=+4725.511694624" Jan 21 16:43:09 crc kubenswrapper[5021]: I0121 16:43:09.969004 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-6qvpz" Jan 21 16:43:09 crc kubenswrapper[5021]: I0121 16:43:09.970537 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-6qvpz" Jan 21 16:43:10 crc kubenswrapper[5021]: I0121 16:43:10.013772 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-6qvpz" Jan 21 16:43:11 crc kubenswrapper[5021]: I0121 16:43:11.057109 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-6qvpz" Jan 21 16:43:11 crc kubenswrapper[5021]: I0121 16:43:11.099244 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6qvpz"] Jan 21 16:43:13 crc kubenswrapper[5021]: I0121 16:43:13.032011 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-6qvpz" podUID="b6903209-5819-4f55-9c74-d174a30c4f3d" containerName="registry-server" containerID="cri-o://c203a9d16d673e21663af6bca0b06610af4f8b848787013e72a50e443c0b2f4b" gracePeriod=2 Jan 21 16:43:13 crc kubenswrapper[5021]: I0121 16:43:13.466605 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6qvpz" Jan 21 16:43:13 crc kubenswrapper[5021]: I0121 16:43:13.651572 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b6903209-5819-4f55-9c74-d174a30c4f3d-utilities\") pod \"b6903209-5819-4f55-9c74-d174a30c4f3d\" (UID: \"b6903209-5819-4f55-9c74-d174a30c4f3d\") " Jan 21 16:43:13 crc kubenswrapper[5021]: I0121 16:43:13.652617 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b6903209-5819-4f55-9c74-d174a30c4f3d-utilities" (OuterVolumeSpecName: "utilities") pod "b6903209-5819-4f55-9c74-d174a30c4f3d" (UID: "b6903209-5819-4f55-9c74-d174a30c4f3d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 16:43:13 crc kubenswrapper[5021]: I0121 16:43:13.653813 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rkm6g\" (UniqueName: \"kubernetes.io/projected/b6903209-5819-4f55-9c74-d174a30c4f3d-kube-api-access-rkm6g\") pod \"b6903209-5819-4f55-9c74-d174a30c4f3d\" (UID: \"b6903209-5819-4f55-9c74-d174a30c4f3d\") " Jan 21 16:43:13 crc kubenswrapper[5021]: I0121 16:43:13.654738 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b6903209-5819-4f55-9c74-d174a30c4f3d-catalog-content\") pod \"b6903209-5819-4f55-9c74-d174a30c4f3d\" (UID: \"b6903209-5819-4f55-9c74-d174a30c4f3d\") " Jan 21 16:43:13 crc kubenswrapper[5021]: I0121 16:43:13.655391 5021 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b6903209-5819-4f55-9c74-d174a30c4f3d-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 16:43:13 crc kubenswrapper[5021]: I0121 16:43:13.678201 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6903209-5819-4f55-9c74-d174a30c4f3d-kube-api-access-rkm6g" (OuterVolumeSpecName: "kube-api-access-rkm6g") pod "b6903209-5819-4f55-9c74-d174a30c4f3d" (UID: "b6903209-5819-4f55-9c74-d174a30c4f3d"). InnerVolumeSpecName "kube-api-access-rkm6g". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 16:43:13 crc kubenswrapper[5021]: I0121 16:43:13.690304 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b6903209-5819-4f55-9c74-d174a30c4f3d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b6903209-5819-4f55-9c74-d174a30c4f3d" (UID: "b6903209-5819-4f55-9c74-d174a30c4f3d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 16:43:13 crc kubenswrapper[5021]: I0121 16:43:13.759281 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rkm6g\" (UniqueName: \"kubernetes.io/projected/b6903209-5819-4f55-9c74-d174a30c4f3d-kube-api-access-rkm6g\") on node \"crc\" DevicePath \"\"" Jan 21 16:43:13 crc kubenswrapper[5021]: I0121 16:43:13.759324 5021 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b6903209-5819-4f55-9c74-d174a30c4f3d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 16:43:14 crc kubenswrapper[5021]: I0121 16:43:14.040892 5021 generic.go:334] "Generic (PLEG): container finished" podID="b6903209-5819-4f55-9c74-d174a30c4f3d" containerID="c203a9d16d673e21663af6bca0b06610af4f8b848787013e72a50e443c0b2f4b" exitCode=0 Jan 21 16:43:14 crc kubenswrapper[5021]: I0121 16:43:14.040962 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6qvpz" event={"ID":"b6903209-5819-4f55-9c74-d174a30c4f3d","Type":"ContainerDied","Data":"c203a9d16d673e21663af6bca0b06610af4f8b848787013e72a50e443c0b2f4b"} Jan 21 16:43:14 crc kubenswrapper[5021]: I0121 16:43:14.040991 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6qvpz" event={"ID":"b6903209-5819-4f55-9c74-d174a30c4f3d","Type":"ContainerDied","Data":"c759fa15377107550fcb8bb1f5c09b97ab11d9ffe8e477fe623cfd73cfc5df6a"} Jan 21 16:43:14 crc kubenswrapper[5021]: I0121 16:43:14.041009 5021 scope.go:117] "RemoveContainer" containerID="c203a9d16d673e21663af6bca0b06610af4f8b848787013e72a50e443c0b2f4b" Jan 21 16:43:14 crc kubenswrapper[5021]: I0121 16:43:14.041110 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6qvpz" Jan 21 16:43:14 crc kubenswrapper[5021]: I0121 16:43:14.066347 5021 scope.go:117] "RemoveContainer" containerID="a4091fc96f533f3d784bbf339a2c20959f0c3d202db1e0aa4a71501584cd39ce" Jan 21 16:43:14 crc kubenswrapper[5021]: I0121 16:43:14.084584 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6qvpz"] Jan 21 16:43:14 crc kubenswrapper[5021]: I0121 16:43:14.090773 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-6qvpz"] Jan 21 16:43:14 crc kubenswrapper[5021]: I0121 16:43:14.093367 5021 scope.go:117] "RemoveContainer" containerID="94450846bb54ba1b404f1994c814f8a6c7b55a11628a60d7aa7b030766804960" Jan 21 16:43:14 crc kubenswrapper[5021]: I0121 16:43:14.109061 5021 scope.go:117] "RemoveContainer" containerID="c203a9d16d673e21663af6bca0b06610af4f8b848787013e72a50e443c0b2f4b" Jan 21 16:43:14 crc kubenswrapper[5021]: E0121 16:43:14.109550 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c203a9d16d673e21663af6bca0b06610af4f8b848787013e72a50e443c0b2f4b\": container with ID starting with c203a9d16d673e21663af6bca0b06610af4f8b848787013e72a50e443c0b2f4b not found: ID does not exist" containerID="c203a9d16d673e21663af6bca0b06610af4f8b848787013e72a50e443c0b2f4b" Jan 21 16:43:14 crc kubenswrapper[5021]: I0121 16:43:14.109604 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c203a9d16d673e21663af6bca0b06610af4f8b848787013e72a50e443c0b2f4b"} err="failed to get container status \"c203a9d16d673e21663af6bca0b06610af4f8b848787013e72a50e443c0b2f4b\": rpc error: code = NotFound desc = could not find container \"c203a9d16d673e21663af6bca0b06610af4f8b848787013e72a50e443c0b2f4b\": container with ID starting with c203a9d16d673e21663af6bca0b06610af4f8b848787013e72a50e443c0b2f4b not found: ID does not exist" Jan 21 16:43:14 crc kubenswrapper[5021]: I0121 16:43:14.109641 5021 scope.go:117] "RemoveContainer" containerID="a4091fc96f533f3d784bbf339a2c20959f0c3d202db1e0aa4a71501584cd39ce" Jan 21 16:43:14 crc kubenswrapper[5021]: E0121 16:43:14.110091 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a4091fc96f533f3d784bbf339a2c20959f0c3d202db1e0aa4a71501584cd39ce\": container with ID starting with a4091fc96f533f3d784bbf339a2c20959f0c3d202db1e0aa4a71501584cd39ce not found: ID does not exist" containerID="a4091fc96f533f3d784bbf339a2c20959f0c3d202db1e0aa4a71501584cd39ce" Jan 21 16:43:14 crc kubenswrapper[5021]: I0121 16:43:14.110122 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a4091fc96f533f3d784bbf339a2c20959f0c3d202db1e0aa4a71501584cd39ce"} err="failed to get container status \"a4091fc96f533f3d784bbf339a2c20959f0c3d202db1e0aa4a71501584cd39ce\": rpc error: code = NotFound desc = could not find container \"a4091fc96f533f3d784bbf339a2c20959f0c3d202db1e0aa4a71501584cd39ce\": container with ID starting with a4091fc96f533f3d784bbf339a2c20959f0c3d202db1e0aa4a71501584cd39ce not found: ID does not exist" Jan 21 16:43:14 crc kubenswrapper[5021]: I0121 16:43:14.110148 5021 scope.go:117] "RemoveContainer" containerID="94450846bb54ba1b404f1994c814f8a6c7b55a11628a60d7aa7b030766804960" Jan 21 16:43:14 crc kubenswrapper[5021]: E0121 16:43:14.110586 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"94450846bb54ba1b404f1994c814f8a6c7b55a11628a60d7aa7b030766804960\": container with ID starting with 94450846bb54ba1b404f1994c814f8a6c7b55a11628a60d7aa7b030766804960 not found: ID does not exist" containerID="94450846bb54ba1b404f1994c814f8a6c7b55a11628a60d7aa7b030766804960" Jan 21 16:43:14 crc kubenswrapper[5021]: I0121 16:43:14.110621 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"94450846bb54ba1b404f1994c814f8a6c7b55a11628a60d7aa7b030766804960"} err="failed to get container status \"94450846bb54ba1b404f1994c814f8a6c7b55a11628a60d7aa7b030766804960\": rpc error: code = NotFound desc = could not find container \"94450846bb54ba1b404f1994c814f8a6c7b55a11628a60d7aa7b030766804960\": container with ID starting with 94450846bb54ba1b404f1994c814f8a6c7b55a11628a60d7aa7b030766804960 not found: ID does not exist" Jan 21 16:43:14 crc kubenswrapper[5021]: I0121 16:43:14.747685 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6903209-5819-4f55-9c74-d174a30c4f3d" path="/var/lib/kubelet/pods/b6903209-5819-4f55-9c74-d174a30c4f3d/volumes" Jan 21 16:44:33 crc kubenswrapper[5021]: I0121 16:44:33.815057 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-ffrdk"] Jan 21 16:44:33 crc kubenswrapper[5021]: E0121 16:44:33.817219 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6903209-5819-4f55-9c74-d174a30c4f3d" containerName="registry-server" Jan 21 16:44:33 crc kubenswrapper[5021]: I0121 16:44:33.817325 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6903209-5819-4f55-9c74-d174a30c4f3d" containerName="registry-server" Jan 21 16:44:33 crc kubenswrapper[5021]: E0121 16:44:33.817397 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6903209-5819-4f55-9c74-d174a30c4f3d" containerName="extract-content" Jan 21 16:44:33 crc kubenswrapper[5021]: I0121 16:44:33.817461 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6903209-5819-4f55-9c74-d174a30c4f3d" containerName="extract-content" Jan 21 16:44:33 crc kubenswrapper[5021]: E0121 16:44:33.817515 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6903209-5819-4f55-9c74-d174a30c4f3d" containerName="extract-utilities" Jan 21 16:44:33 crc kubenswrapper[5021]: I0121 16:44:33.817571 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6903209-5819-4f55-9c74-d174a30c4f3d" containerName="extract-utilities" Jan 21 16:44:33 crc kubenswrapper[5021]: I0121 16:44:33.817760 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6903209-5819-4f55-9c74-d174a30c4f3d" containerName="registry-server" Jan 21 16:44:33 crc kubenswrapper[5021]: I0121 16:44:33.819070 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ffrdk" Jan 21 16:44:33 crc kubenswrapper[5021]: I0121 16:44:33.828833 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-ffrdk"] Jan 21 16:44:33 crc kubenswrapper[5021]: I0121 16:44:33.977709 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a82e1673-b572-4d15-a2f7-89b4ada9e5a1-catalog-content\") pod \"community-operators-ffrdk\" (UID: \"a82e1673-b572-4d15-a2f7-89b4ada9e5a1\") " pod="openshift-marketplace/community-operators-ffrdk" Jan 21 16:44:33 crc kubenswrapper[5021]: I0121 16:44:33.978359 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a82e1673-b572-4d15-a2f7-89b4ada9e5a1-utilities\") pod \"community-operators-ffrdk\" (UID: \"a82e1673-b572-4d15-a2f7-89b4ada9e5a1\") " pod="openshift-marketplace/community-operators-ffrdk" Jan 21 16:44:33 crc kubenswrapper[5021]: I0121 16:44:33.978510 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5j2qg\" (UniqueName: \"kubernetes.io/projected/a82e1673-b572-4d15-a2f7-89b4ada9e5a1-kube-api-access-5j2qg\") pod \"community-operators-ffrdk\" (UID: \"a82e1673-b572-4d15-a2f7-89b4ada9e5a1\") " pod="openshift-marketplace/community-operators-ffrdk" Jan 21 16:44:34 crc kubenswrapper[5021]: I0121 16:44:34.079882 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a82e1673-b572-4d15-a2f7-89b4ada9e5a1-utilities\") pod \"community-operators-ffrdk\" (UID: \"a82e1673-b572-4d15-a2f7-89b4ada9e5a1\") " pod="openshift-marketplace/community-operators-ffrdk" Jan 21 16:44:34 crc kubenswrapper[5021]: I0121 16:44:34.080295 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5j2qg\" (UniqueName: \"kubernetes.io/projected/a82e1673-b572-4d15-a2f7-89b4ada9e5a1-kube-api-access-5j2qg\") pod \"community-operators-ffrdk\" (UID: \"a82e1673-b572-4d15-a2f7-89b4ada9e5a1\") " pod="openshift-marketplace/community-operators-ffrdk" Jan 21 16:44:34 crc kubenswrapper[5021]: I0121 16:44:34.080454 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a82e1673-b572-4d15-a2f7-89b4ada9e5a1-utilities\") pod \"community-operators-ffrdk\" (UID: \"a82e1673-b572-4d15-a2f7-89b4ada9e5a1\") " pod="openshift-marketplace/community-operators-ffrdk" Jan 21 16:44:34 crc kubenswrapper[5021]: I0121 16:44:34.081390 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a82e1673-b572-4d15-a2f7-89b4ada9e5a1-catalog-content\") pod \"community-operators-ffrdk\" (UID: \"a82e1673-b572-4d15-a2f7-89b4ada9e5a1\") " pod="openshift-marketplace/community-operators-ffrdk" Jan 21 16:44:34 crc kubenswrapper[5021]: I0121 16:44:34.081508 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a82e1673-b572-4d15-a2f7-89b4ada9e5a1-catalog-content\") pod \"community-operators-ffrdk\" (UID: \"a82e1673-b572-4d15-a2f7-89b4ada9e5a1\") " pod="openshift-marketplace/community-operators-ffrdk" Jan 21 16:44:34 crc kubenswrapper[5021]: I0121 16:44:34.101504 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5j2qg\" (UniqueName: \"kubernetes.io/projected/a82e1673-b572-4d15-a2f7-89b4ada9e5a1-kube-api-access-5j2qg\") pod \"community-operators-ffrdk\" (UID: \"a82e1673-b572-4d15-a2f7-89b4ada9e5a1\") " pod="openshift-marketplace/community-operators-ffrdk" Jan 21 16:44:34 crc kubenswrapper[5021]: I0121 16:44:34.145986 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ffrdk" Jan 21 16:44:34 crc kubenswrapper[5021]: I0121 16:44:34.686996 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-ffrdk"] Jan 21 16:44:34 crc kubenswrapper[5021]: I0121 16:44:34.867740 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ffrdk" event={"ID":"a82e1673-b572-4d15-a2f7-89b4ada9e5a1","Type":"ContainerStarted","Data":"f43c3bfaef274542968a5df3cb22d3ef9cdbfc7087b00a4ff631cfa7943f8ccc"} Jan 21 16:44:34 crc kubenswrapper[5021]: I0121 16:44:34.868554 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ffrdk" event={"ID":"a82e1673-b572-4d15-a2f7-89b4ada9e5a1","Type":"ContainerStarted","Data":"2bc430c2ba92bb8762339d33d742fe461f79a2d21f5fe8469186026de806b2e6"} Jan 21 16:44:35 crc kubenswrapper[5021]: I0121 16:44:35.878543 5021 generic.go:334] "Generic (PLEG): container finished" podID="a82e1673-b572-4d15-a2f7-89b4ada9e5a1" containerID="f43c3bfaef274542968a5df3cb22d3ef9cdbfc7087b00a4ff631cfa7943f8ccc" exitCode=0 Jan 21 16:44:35 crc kubenswrapper[5021]: I0121 16:44:35.878872 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ffrdk" event={"ID":"a82e1673-b572-4d15-a2f7-89b4ada9e5a1","Type":"ContainerDied","Data":"f43c3bfaef274542968a5df3cb22d3ef9cdbfc7087b00a4ff631cfa7943f8ccc"} Jan 21 16:44:36 crc kubenswrapper[5021]: I0121 16:44:36.887478 5021 generic.go:334] "Generic (PLEG): container finished" podID="a82e1673-b572-4d15-a2f7-89b4ada9e5a1" containerID="eb3d0b3d716d3f969919552b3b7817d30dcc2ba826b5332d71e945b21f801f48" exitCode=0 Jan 21 16:44:36 crc kubenswrapper[5021]: I0121 16:44:36.887551 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ffrdk" event={"ID":"a82e1673-b572-4d15-a2f7-89b4ada9e5a1","Type":"ContainerDied","Data":"eb3d0b3d716d3f969919552b3b7817d30dcc2ba826b5332d71e945b21f801f48"} Jan 21 16:44:37 crc kubenswrapper[5021]: I0121 16:44:37.898115 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ffrdk" event={"ID":"a82e1673-b572-4d15-a2f7-89b4ada9e5a1","Type":"ContainerStarted","Data":"2a455e55286e207b7df0d1e8bd5a26233405f3e7b42aa7ab753205b6a0d25a20"} Jan 21 16:44:44 crc kubenswrapper[5021]: I0121 16:44:44.146651 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-ffrdk" Jan 21 16:44:44 crc kubenswrapper[5021]: I0121 16:44:44.147262 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-ffrdk" Jan 21 16:44:44 crc kubenswrapper[5021]: I0121 16:44:44.189635 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-ffrdk" Jan 21 16:44:44 crc kubenswrapper[5021]: I0121 16:44:44.211125 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-ffrdk" podStartSLOduration=9.806695069 podStartE2EDuration="11.211103587s" podCreationTimestamp="2026-01-21 16:44:33 +0000 UTC" firstStartedPulling="2026-01-21 16:44:35.884866534 +0000 UTC m=+4817.419980433" lastFinishedPulling="2026-01-21 16:44:37.289275062 +0000 UTC m=+4818.824388951" observedRunningTime="2026-01-21 16:44:37.934297736 +0000 UTC m=+4819.469411625" watchObservedRunningTime="2026-01-21 16:44:44.211103587 +0000 UTC m=+4825.746217476" Jan 21 16:44:44 crc kubenswrapper[5021]: I0121 16:44:44.983852 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-ffrdk" Jan 21 16:44:45 crc kubenswrapper[5021]: I0121 16:44:45.026962 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-ffrdk"] Jan 21 16:44:46 crc kubenswrapper[5021]: I0121 16:44:46.954346 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-ffrdk" podUID="a82e1673-b572-4d15-a2f7-89b4ada9e5a1" containerName="registry-server" containerID="cri-o://2a455e55286e207b7df0d1e8bd5a26233405f3e7b42aa7ab753205b6a0d25a20" gracePeriod=2 Jan 21 16:44:47 crc kubenswrapper[5021]: I0121 16:44:47.840831 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ffrdk" Jan 21 16:44:47 crc kubenswrapper[5021]: I0121 16:44:47.970587 5021 generic.go:334] "Generic (PLEG): container finished" podID="a82e1673-b572-4d15-a2f7-89b4ada9e5a1" containerID="2a455e55286e207b7df0d1e8bd5a26233405f3e7b42aa7ab753205b6a0d25a20" exitCode=0 Jan 21 16:44:47 crc kubenswrapper[5021]: I0121 16:44:47.970637 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ffrdk" event={"ID":"a82e1673-b572-4d15-a2f7-89b4ada9e5a1","Type":"ContainerDied","Data":"2a455e55286e207b7df0d1e8bd5a26233405f3e7b42aa7ab753205b6a0d25a20"} Jan 21 16:44:47 crc kubenswrapper[5021]: I0121 16:44:47.970681 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ffrdk" event={"ID":"a82e1673-b572-4d15-a2f7-89b4ada9e5a1","Type":"ContainerDied","Data":"2bc430c2ba92bb8762339d33d742fe461f79a2d21f5fe8469186026de806b2e6"} Jan 21 16:44:47 crc kubenswrapper[5021]: I0121 16:44:47.970703 5021 scope.go:117] "RemoveContainer" containerID="2a455e55286e207b7df0d1e8bd5a26233405f3e7b42aa7ab753205b6a0d25a20" Jan 21 16:44:47 crc kubenswrapper[5021]: I0121 16:44:47.970716 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ffrdk" Jan 21 16:44:47 crc kubenswrapper[5021]: I0121 16:44:47.987478 5021 scope.go:117] "RemoveContainer" containerID="eb3d0b3d716d3f969919552b3b7817d30dcc2ba826b5332d71e945b21f801f48" Jan 21 16:44:47 crc kubenswrapper[5021]: I0121 16:44:47.993596 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a82e1673-b572-4d15-a2f7-89b4ada9e5a1-utilities\") pod \"a82e1673-b572-4d15-a2f7-89b4ada9e5a1\" (UID: \"a82e1673-b572-4d15-a2f7-89b4ada9e5a1\") " Jan 21 16:44:47 crc kubenswrapper[5021]: I0121 16:44:47.993710 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a82e1673-b572-4d15-a2f7-89b4ada9e5a1-catalog-content\") pod \"a82e1673-b572-4d15-a2f7-89b4ada9e5a1\" (UID: \"a82e1673-b572-4d15-a2f7-89b4ada9e5a1\") " Jan 21 16:44:47 crc kubenswrapper[5021]: I0121 16:44:47.993798 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5j2qg\" (UniqueName: \"kubernetes.io/projected/a82e1673-b572-4d15-a2f7-89b4ada9e5a1-kube-api-access-5j2qg\") pod \"a82e1673-b572-4d15-a2f7-89b4ada9e5a1\" (UID: \"a82e1673-b572-4d15-a2f7-89b4ada9e5a1\") " Jan 21 16:44:47 crc kubenswrapper[5021]: I0121 16:44:47.995011 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a82e1673-b572-4d15-a2f7-89b4ada9e5a1-utilities" (OuterVolumeSpecName: "utilities") pod "a82e1673-b572-4d15-a2f7-89b4ada9e5a1" (UID: "a82e1673-b572-4d15-a2f7-89b4ada9e5a1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 16:44:48 crc kubenswrapper[5021]: I0121 16:44:48.020096 5021 scope.go:117] "RemoveContainer" containerID="f43c3bfaef274542968a5df3cb22d3ef9cdbfc7087b00a4ff631cfa7943f8ccc" Jan 21 16:44:48 crc kubenswrapper[5021]: I0121 16:44:48.021021 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a82e1673-b572-4d15-a2f7-89b4ada9e5a1-kube-api-access-5j2qg" (OuterVolumeSpecName: "kube-api-access-5j2qg") pod "a82e1673-b572-4d15-a2f7-89b4ada9e5a1" (UID: "a82e1673-b572-4d15-a2f7-89b4ada9e5a1"). InnerVolumeSpecName "kube-api-access-5j2qg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 16:44:48 crc kubenswrapper[5021]: I0121 16:44:48.037510 5021 scope.go:117] "RemoveContainer" containerID="2a455e55286e207b7df0d1e8bd5a26233405f3e7b42aa7ab753205b6a0d25a20" Jan 21 16:44:48 crc kubenswrapper[5021]: E0121 16:44:48.038038 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2a455e55286e207b7df0d1e8bd5a26233405f3e7b42aa7ab753205b6a0d25a20\": container with ID starting with 2a455e55286e207b7df0d1e8bd5a26233405f3e7b42aa7ab753205b6a0d25a20 not found: ID does not exist" containerID="2a455e55286e207b7df0d1e8bd5a26233405f3e7b42aa7ab753205b6a0d25a20" Jan 21 16:44:48 crc kubenswrapper[5021]: I0121 16:44:48.038153 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2a455e55286e207b7df0d1e8bd5a26233405f3e7b42aa7ab753205b6a0d25a20"} err="failed to get container status \"2a455e55286e207b7df0d1e8bd5a26233405f3e7b42aa7ab753205b6a0d25a20\": rpc error: code = NotFound desc = could not find container \"2a455e55286e207b7df0d1e8bd5a26233405f3e7b42aa7ab753205b6a0d25a20\": container with ID starting with 2a455e55286e207b7df0d1e8bd5a26233405f3e7b42aa7ab753205b6a0d25a20 not found: ID does not exist" Jan 21 16:44:48 crc kubenswrapper[5021]: I0121 16:44:48.038261 5021 scope.go:117] "RemoveContainer" containerID="eb3d0b3d716d3f969919552b3b7817d30dcc2ba826b5332d71e945b21f801f48" Jan 21 16:44:48 crc kubenswrapper[5021]: E0121 16:44:48.038756 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eb3d0b3d716d3f969919552b3b7817d30dcc2ba826b5332d71e945b21f801f48\": container with ID starting with eb3d0b3d716d3f969919552b3b7817d30dcc2ba826b5332d71e945b21f801f48 not found: ID does not exist" containerID="eb3d0b3d716d3f969919552b3b7817d30dcc2ba826b5332d71e945b21f801f48" Jan 21 16:44:48 crc kubenswrapper[5021]: I0121 16:44:48.038790 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eb3d0b3d716d3f969919552b3b7817d30dcc2ba826b5332d71e945b21f801f48"} err="failed to get container status \"eb3d0b3d716d3f969919552b3b7817d30dcc2ba826b5332d71e945b21f801f48\": rpc error: code = NotFound desc = could not find container \"eb3d0b3d716d3f969919552b3b7817d30dcc2ba826b5332d71e945b21f801f48\": container with ID starting with eb3d0b3d716d3f969919552b3b7817d30dcc2ba826b5332d71e945b21f801f48 not found: ID does not exist" Jan 21 16:44:48 crc kubenswrapper[5021]: I0121 16:44:48.038808 5021 scope.go:117] "RemoveContainer" containerID="f43c3bfaef274542968a5df3cb22d3ef9cdbfc7087b00a4ff631cfa7943f8ccc" Jan 21 16:44:48 crc kubenswrapper[5021]: E0121 16:44:48.039192 5021 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f43c3bfaef274542968a5df3cb22d3ef9cdbfc7087b00a4ff631cfa7943f8ccc\": container with ID starting with f43c3bfaef274542968a5df3cb22d3ef9cdbfc7087b00a4ff631cfa7943f8ccc not found: ID does not exist" containerID="f43c3bfaef274542968a5df3cb22d3ef9cdbfc7087b00a4ff631cfa7943f8ccc" Jan 21 16:44:48 crc kubenswrapper[5021]: I0121 16:44:48.039347 5021 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f43c3bfaef274542968a5df3cb22d3ef9cdbfc7087b00a4ff631cfa7943f8ccc"} err="failed to get container status \"f43c3bfaef274542968a5df3cb22d3ef9cdbfc7087b00a4ff631cfa7943f8ccc\": rpc error: code = NotFound desc = could not find container \"f43c3bfaef274542968a5df3cb22d3ef9cdbfc7087b00a4ff631cfa7943f8ccc\": container with ID starting with f43c3bfaef274542968a5df3cb22d3ef9cdbfc7087b00a4ff631cfa7943f8ccc not found: ID does not exist" Jan 21 16:44:48 crc kubenswrapper[5021]: I0121 16:44:48.066637 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a82e1673-b572-4d15-a2f7-89b4ada9e5a1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a82e1673-b572-4d15-a2f7-89b4ada9e5a1" (UID: "a82e1673-b572-4d15-a2f7-89b4ada9e5a1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 16:44:48 crc kubenswrapper[5021]: I0121 16:44:48.096031 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5j2qg\" (UniqueName: \"kubernetes.io/projected/a82e1673-b572-4d15-a2f7-89b4ada9e5a1-kube-api-access-5j2qg\") on node \"crc\" DevicePath \"\"" Jan 21 16:44:48 crc kubenswrapper[5021]: I0121 16:44:48.096073 5021 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a82e1673-b572-4d15-a2f7-89b4ada9e5a1-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 16:44:48 crc kubenswrapper[5021]: I0121 16:44:48.096085 5021 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a82e1673-b572-4d15-a2f7-89b4ada9e5a1-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 16:44:48 crc kubenswrapper[5021]: I0121 16:44:48.303654 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-ffrdk"] Jan 21 16:44:48 crc kubenswrapper[5021]: I0121 16:44:48.309701 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-ffrdk"] Jan 21 16:44:48 crc kubenswrapper[5021]: I0121 16:44:48.747076 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a82e1673-b572-4d15-a2f7-89b4ada9e5a1" path="/var/lib/kubelet/pods/a82e1673-b572-4d15-a2f7-89b4ada9e5a1/volumes" Jan 21 16:45:00 crc kubenswrapper[5021]: I0121 16:45:00.146471 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483565-wdj28"] Jan 21 16:45:00 crc kubenswrapper[5021]: E0121 16:45:00.147391 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a82e1673-b572-4d15-a2f7-89b4ada9e5a1" containerName="registry-server" Jan 21 16:45:00 crc kubenswrapper[5021]: I0121 16:45:00.147410 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="a82e1673-b572-4d15-a2f7-89b4ada9e5a1" containerName="registry-server" Jan 21 16:45:00 crc kubenswrapper[5021]: E0121 16:45:00.147442 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a82e1673-b572-4d15-a2f7-89b4ada9e5a1" containerName="extract-content" Jan 21 16:45:00 crc kubenswrapper[5021]: I0121 16:45:00.147449 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="a82e1673-b572-4d15-a2f7-89b4ada9e5a1" containerName="extract-content" Jan 21 16:45:00 crc kubenswrapper[5021]: E0121 16:45:00.147465 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a82e1673-b572-4d15-a2f7-89b4ada9e5a1" containerName="extract-utilities" Jan 21 16:45:00 crc kubenswrapper[5021]: I0121 16:45:00.147474 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="a82e1673-b572-4d15-a2f7-89b4ada9e5a1" containerName="extract-utilities" Jan 21 16:45:00 crc kubenswrapper[5021]: I0121 16:45:00.147649 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="a82e1673-b572-4d15-a2f7-89b4ada9e5a1" containerName="registry-server" Jan 21 16:45:00 crc kubenswrapper[5021]: I0121 16:45:00.148205 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483565-wdj28" Jan 21 16:45:00 crc kubenswrapper[5021]: I0121 16:45:00.152979 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 21 16:45:00 crc kubenswrapper[5021]: I0121 16:45:00.153387 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 21 16:45:00 crc kubenswrapper[5021]: I0121 16:45:00.157587 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483565-wdj28"] Jan 21 16:45:00 crc kubenswrapper[5021]: I0121 16:45:00.304445 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3d4fe53d-867d-47e4-a3df-0729580b276d-secret-volume\") pod \"collect-profiles-29483565-wdj28\" (UID: \"3d4fe53d-867d-47e4-a3df-0729580b276d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483565-wdj28" Jan 21 16:45:00 crc kubenswrapper[5021]: I0121 16:45:00.304569 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8429h\" (UniqueName: \"kubernetes.io/projected/3d4fe53d-867d-47e4-a3df-0729580b276d-kube-api-access-8429h\") pod \"collect-profiles-29483565-wdj28\" (UID: \"3d4fe53d-867d-47e4-a3df-0729580b276d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483565-wdj28" Jan 21 16:45:00 crc kubenswrapper[5021]: I0121 16:45:00.304601 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3d4fe53d-867d-47e4-a3df-0729580b276d-config-volume\") pod \"collect-profiles-29483565-wdj28\" (UID: \"3d4fe53d-867d-47e4-a3df-0729580b276d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483565-wdj28" Jan 21 16:45:00 crc kubenswrapper[5021]: I0121 16:45:00.405507 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3d4fe53d-867d-47e4-a3df-0729580b276d-secret-volume\") pod \"collect-profiles-29483565-wdj28\" (UID: \"3d4fe53d-867d-47e4-a3df-0729580b276d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483565-wdj28" Jan 21 16:45:00 crc kubenswrapper[5021]: I0121 16:45:00.405626 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8429h\" (UniqueName: \"kubernetes.io/projected/3d4fe53d-867d-47e4-a3df-0729580b276d-kube-api-access-8429h\") pod \"collect-profiles-29483565-wdj28\" (UID: \"3d4fe53d-867d-47e4-a3df-0729580b276d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483565-wdj28" Jan 21 16:45:00 crc kubenswrapper[5021]: I0121 16:45:00.405658 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3d4fe53d-867d-47e4-a3df-0729580b276d-config-volume\") pod \"collect-profiles-29483565-wdj28\" (UID: \"3d4fe53d-867d-47e4-a3df-0729580b276d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483565-wdj28" Jan 21 16:45:00 crc kubenswrapper[5021]: I0121 16:45:00.406800 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3d4fe53d-867d-47e4-a3df-0729580b276d-config-volume\") pod \"collect-profiles-29483565-wdj28\" (UID: \"3d4fe53d-867d-47e4-a3df-0729580b276d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483565-wdj28" Jan 21 16:45:00 crc kubenswrapper[5021]: I0121 16:45:00.417636 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3d4fe53d-867d-47e4-a3df-0729580b276d-secret-volume\") pod \"collect-profiles-29483565-wdj28\" (UID: \"3d4fe53d-867d-47e4-a3df-0729580b276d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483565-wdj28" Jan 21 16:45:00 crc kubenswrapper[5021]: I0121 16:45:00.427696 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8429h\" (UniqueName: \"kubernetes.io/projected/3d4fe53d-867d-47e4-a3df-0729580b276d-kube-api-access-8429h\") pod \"collect-profiles-29483565-wdj28\" (UID: \"3d4fe53d-867d-47e4-a3df-0729580b276d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483565-wdj28" Jan 21 16:45:00 crc kubenswrapper[5021]: I0121 16:45:00.473790 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483565-wdj28" Jan 21 16:45:01 crc kubenswrapper[5021]: I0121 16:45:01.082443 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483565-wdj28"] Jan 21 16:45:02 crc kubenswrapper[5021]: I0121 16:45:02.096599 5021 generic.go:334] "Generic (PLEG): container finished" podID="3d4fe53d-867d-47e4-a3df-0729580b276d" containerID="fa0ed8a4a676ce368d9569799ce94202cfe315dda8ac9e018246bd35ece40e66" exitCode=0 Jan 21 16:45:02 crc kubenswrapper[5021]: I0121 16:45:02.096693 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29483565-wdj28" event={"ID":"3d4fe53d-867d-47e4-a3df-0729580b276d","Type":"ContainerDied","Data":"fa0ed8a4a676ce368d9569799ce94202cfe315dda8ac9e018246bd35ece40e66"} Jan 21 16:45:02 crc kubenswrapper[5021]: I0121 16:45:02.096746 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29483565-wdj28" event={"ID":"3d4fe53d-867d-47e4-a3df-0729580b276d","Type":"ContainerStarted","Data":"a61da6beb3da5751c97d39e24d6f3d8a47e2c7d065be0b827973ad57a8890d2e"} Jan 21 16:45:03 crc kubenswrapper[5021]: I0121 16:45:03.348423 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483565-wdj28" Jan 21 16:45:03 crc kubenswrapper[5021]: I0121 16:45:03.458059 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3d4fe53d-867d-47e4-a3df-0729580b276d-config-volume\") pod \"3d4fe53d-867d-47e4-a3df-0729580b276d\" (UID: \"3d4fe53d-867d-47e4-a3df-0729580b276d\") " Jan 21 16:45:03 crc kubenswrapper[5021]: I0121 16:45:03.458116 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3d4fe53d-867d-47e4-a3df-0729580b276d-secret-volume\") pod \"3d4fe53d-867d-47e4-a3df-0729580b276d\" (UID: \"3d4fe53d-867d-47e4-a3df-0729580b276d\") " Jan 21 16:45:03 crc kubenswrapper[5021]: I0121 16:45:03.458161 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8429h\" (UniqueName: \"kubernetes.io/projected/3d4fe53d-867d-47e4-a3df-0729580b276d-kube-api-access-8429h\") pod \"3d4fe53d-867d-47e4-a3df-0729580b276d\" (UID: \"3d4fe53d-867d-47e4-a3df-0729580b276d\") " Jan 21 16:45:03 crc kubenswrapper[5021]: I0121 16:45:03.459689 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d4fe53d-867d-47e4-a3df-0729580b276d-config-volume" (OuterVolumeSpecName: "config-volume") pod "3d4fe53d-867d-47e4-a3df-0729580b276d" (UID: "3d4fe53d-867d-47e4-a3df-0729580b276d"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 16:45:03 crc kubenswrapper[5021]: I0121 16:45:03.464803 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3d4fe53d-867d-47e4-a3df-0729580b276d-kube-api-access-8429h" (OuterVolumeSpecName: "kube-api-access-8429h") pod "3d4fe53d-867d-47e4-a3df-0729580b276d" (UID: "3d4fe53d-867d-47e4-a3df-0729580b276d"). InnerVolumeSpecName "kube-api-access-8429h". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 16:45:03 crc kubenswrapper[5021]: I0121 16:45:03.465434 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d4fe53d-867d-47e4-a3df-0729580b276d-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "3d4fe53d-867d-47e4-a3df-0729580b276d" (UID: "3d4fe53d-867d-47e4-a3df-0729580b276d"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 16:45:03 crc kubenswrapper[5021]: I0121 16:45:03.559794 5021 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3d4fe53d-867d-47e4-a3df-0729580b276d-config-volume\") on node \"crc\" DevicePath \"\"" Jan 21 16:45:03 crc kubenswrapper[5021]: I0121 16:45:03.559843 5021 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3d4fe53d-867d-47e4-a3df-0729580b276d-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 21 16:45:03 crc kubenswrapper[5021]: I0121 16:45:03.559859 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8429h\" (UniqueName: \"kubernetes.io/projected/3d4fe53d-867d-47e4-a3df-0729580b276d-kube-api-access-8429h\") on node \"crc\" DevicePath \"\"" Jan 21 16:45:04 crc kubenswrapper[5021]: I0121 16:45:04.111764 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29483565-wdj28" event={"ID":"3d4fe53d-867d-47e4-a3df-0729580b276d","Type":"ContainerDied","Data":"a61da6beb3da5751c97d39e24d6f3d8a47e2c7d065be0b827973ad57a8890d2e"} Jan 21 16:45:04 crc kubenswrapper[5021]: I0121 16:45:04.111810 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483565-wdj28" Jan 21 16:45:04 crc kubenswrapper[5021]: I0121 16:45:04.111813 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a61da6beb3da5751c97d39e24d6f3d8a47e2c7d065be0b827973ad57a8890d2e" Jan 21 16:45:04 crc kubenswrapper[5021]: I0121 16:45:04.412504 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483520-w4dwd"] Jan 21 16:45:04 crc kubenswrapper[5021]: I0121 16:45:04.418235 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483520-w4dwd"] Jan 21 16:45:04 crc kubenswrapper[5021]: I0121 16:45:04.746520 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8975a103-c0c6-470d-a4f3-e7bee2308bb6" path="/var/lib/kubelet/pods/8975a103-c0c6-470d-a4f3-e7bee2308bb6/volumes" Jan 21 16:45:12 crc kubenswrapper[5021]: I0121 16:45:12.357375 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 16:45:12 crc kubenswrapper[5021]: I0121 16:45:12.357929 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 16:45:42 crc kubenswrapper[5021]: I0121 16:45:42.082104 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-bq7px"] Jan 21 16:45:42 crc kubenswrapper[5021]: E0121 16:45:42.083122 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d4fe53d-867d-47e4-a3df-0729580b276d" containerName="collect-profiles" Jan 21 16:45:42 crc kubenswrapper[5021]: I0121 16:45:42.083140 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d4fe53d-867d-47e4-a3df-0729580b276d" containerName="collect-profiles" Jan 21 16:45:42 crc kubenswrapper[5021]: I0121 16:45:42.083342 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d4fe53d-867d-47e4-a3df-0729580b276d" containerName="collect-profiles" Jan 21 16:45:42 crc kubenswrapper[5021]: I0121 16:45:42.084711 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bq7px" Jan 21 16:45:42 crc kubenswrapper[5021]: I0121 16:45:42.087229 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-bq7px"] Jan 21 16:45:42 crc kubenswrapper[5021]: I0121 16:45:42.144420 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ca95344-c59d-496c-a380-e2e397d3e333-catalog-content\") pod \"certified-operators-bq7px\" (UID: \"4ca95344-c59d-496c-a380-e2e397d3e333\") " pod="openshift-marketplace/certified-operators-bq7px" Jan 21 16:45:42 crc kubenswrapper[5021]: I0121 16:45:42.144503 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ca95344-c59d-496c-a380-e2e397d3e333-utilities\") pod \"certified-operators-bq7px\" (UID: \"4ca95344-c59d-496c-a380-e2e397d3e333\") " pod="openshift-marketplace/certified-operators-bq7px" Jan 21 16:45:42 crc kubenswrapper[5021]: I0121 16:45:42.144542 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jknsq\" (UniqueName: \"kubernetes.io/projected/4ca95344-c59d-496c-a380-e2e397d3e333-kube-api-access-jknsq\") pod \"certified-operators-bq7px\" (UID: \"4ca95344-c59d-496c-a380-e2e397d3e333\") " pod="openshift-marketplace/certified-operators-bq7px" Jan 21 16:45:42 crc kubenswrapper[5021]: I0121 16:45:42.246099 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ca95344-c59d-496c-a380-e2e397d3e333-catalog-content\") pod \"certified-operators-bq7px\" (UID: \"4ca95344-c59d-496c-a380-e2e397d3e333\") " pod="openshift-marketplace/certified-operators-bq7px" Jan 21 16:45:42 crc kubenswrapper[5021]: I0121 16:45:42.246197 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ca95344-c59d-496c-a380-e2e397d3e333-utilities\") pod \"certified-operators-bq7px\" (UID: \"4ca95344-c59d-496c-a380-e2e397d3e333\") " pod="openshift-marketplace/certified-operators-bq7px" Jan 21 16:45:42 crc kubenswrapper[5021]: I0121 16:45:42.246238 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jknsq\" (UniqueName: \"kubernetes.io/projected/4ca95344-c59d-496c-a380-e2e397d3e333-kube-api-access-jknsq\") pod \"certified-operators-bq7px\" (UID: \"4ca95344-c59d-496c-a380-e2e397d3e333\") " pod="openshift-marketplace/certified-operators-bq7px" Jan 21 16:45:42 crc kubenswrapper[5021]: I0121 16:45:42.247751 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ca95344-c59d-496c-a380-e2e397d3e333-catalog-content\") pod \"certified-operators-bq7px\" (UID: \"4ca95344-c59d-496c-a380-e2e397d3e333\") " pod="openshift-marketplace/certified-operators-bq7px" Jan 21 16:45:42 crc kubenswrapper[5021]: I0121 16:45:42.248021 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ca95344-c59d-496c-a380-e2e397d3e333-utilities\") pod \"certified-operators-bq7px\" (UID: \"4ca95344-c59d-496c-a380-e2e397d3e333\") " pod="openshift-marketplace/certified-operators-bq7px" Jan 21 16:45:42 crc kubenswrapper[5021]: I0121 16:45:42.277160 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jknsq\" (UniqueName: \"kubernetes.io/projected/4ca95344-c59d-496c-a380-e2e397d3e333-kube-api-access-jknsq\") pod \"certified-operators-bq7px\" (UID: \"4ca95344-c59d-496c-a380-e2e397d3e333\") " pod="openshift-marketplace/certified-operators-bq7px" Jan 21 16:45:42 crc kubenswrapper[5021]: I0121 16:45:42.357485 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 16:45:42 crc kubenswrapper[5021]: I0121 16:45:42.357842 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 16:45:42 crc kubenswrapper[5021]: I0121 16:45:42.406607 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bq7px" Jan 21 16:45:42 crc kubenswrapper[5021]: I0121 16:45:42.934740 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-bq7px"] Jan 21 16:45:43 crc kubenswrapper[5021]: I0121 16:45:43.379296 5021 generic.go:334] "Generic (PLEG): container finished" podID="4ca95344-c59d-496c-a380-e2e397d3e333" containerID="dc1479ab6208b4c9b6360792d5a862412290b9fafd5112cc6b7879687548fe1d" exitCode=0 Jan 21 16:45:43 crc kubenswrapper[5021]: I0121 16:45:43.379396 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bq7px" event={"ID":"4ca95344-c59d-496c-a380-e2e397d3e333","Type":"ContainerDied","Data":"dc1479ab6208b4c9b6360792d5a862412290b9fafd5112cc6b7879687548fe1d"} Jan 21 16:45:43 crc kubenswrapper[5021]: I0121 16:45:43.379695 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bq7px" event={"ID":"4ca95344-c59d-496c-a380-e2e397d3e333","Type":"ContainerStarted","Data":"75ec2c03709a8e810aa61acf284e416b8c4d02223b35a1711cec0efdfae580c4"} Jan 21 16:45:44 crc kubenswrapper[5021]: I0121 16:45:44.387623 5021 generic.go:334] "Generic (PLEG): container finished" podID="4ca95344-c59d-496c-a380-e2e397d3e333" containerID="be399562fd047e54a265f23dcf24eb6b1c2c4086291161c9f63c5c7656ad797b" exitCode=0 Jan 21 16:45:44 crc kubenswrapper[5021]: I0121 16:45:44.387697 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bq7px" event={"ID":"4ca95344-c59d-496c-a380-e2e397d3e333","Type":"ContainerDied","Data":"be399562fd047e54a265f23dcf24eb6b1c2c4086291161c9f63c5c7656ad797b"} Jan 21 16:45:45 crc kubenswrapper[5021]: I0121 16:45:45.398345 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bq7px" event={"ID":"4ca95344-c59d-496c-a380-e2e397d3e333","Type":"ContainerStarted","Data":"47d50680be6f3601b1674972a43c0e02f769a5ee9d8f0d19fc8bbbbc8de8677f"} Jan 21 16:45:45 crc kubenswrapper[5021]: I0121 16:45:45.456688 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-bq7px" podStartSLOduration=2.062047561 podStartE2EDuration="3.456668734s" podCreationTimestamp="2026-01-21 16:45:42 +0000 UTC" firstStartedPulling="2026-01-21 16:45:43.384280219 +0000 UTC m=+4884.919394098" lastFinishedPulling="2026-01-21 16:45:44.778901382 +0000 UTC m=+4886.314015271" observedRunningTime="2026-01-21 16:45:45.441649667 +0000 UTC m=+4886.976763546" watchObservedRunningTime="2026-01-21 16:45:45.456668734 +0000 UTC m=+4886.991782623" Jan 21 16:45:49 crc kubenswrapper[5021]: I0121 16:45:49.665199 5021 scope.go:117] "RemoveContainer" containerID="45068c5e8c72bdb74bb03a6b5a36960522413cee4976f58db362ba6e0b08ed85" Jan 21 16:45:52 crc kubenswrapper[5021]: I0121 16:45:52.407690 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-bq7px" Jan 21 16:45:52 crc kubenswrapper[5021]: I0121 16:45:52.408769 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-bq7px" Jan 21 16:45:52 crc kubenswrapper[5021]: I0121 16:45:52.459587 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-bq7px" Jan 21 16:45:53 crc kubenswrapper[5021]: I0121 16:45:53.502996 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-bq7px" Jan 21 16:45:53 crc kubenswrapper[5021]: I0121 16:45:53.553239 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-bq7px"] Jan 21 16:45:55 crc kubenswrapper[5021]: I0121 16:45:55.472681 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-bq7px" podUID="4ca95344-c59d-496c-a380-e2e397d3e333" containerName="registry-server" containerID="cri-o://47d50680be6f3601b1674972a43c0e02f769a5ee9d8f0d19fc8bbbbc8de8677f" gracePeriod=2 Jan 21 16:45:56 crc kubenswrapper[5021]: I0121 16:45:56.481372 5021 generic.go:334] "Generic (PLEG): container finished" podID="4ca95344-c59d-496c-a380-e2e397d3e333" containerID="47d50680be6f3601b1674972a43c0e02f769a5ee9d8f0d19fc8bbbbc8de8677f" exitCode=0 Jan 21 16:45:56 crc kubenswrapper[5021]: I0121 16:45:56.481446 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bq7px" event={"ID":"4ca95344-c59d-496c-a380-e2e397d3e333","Type":"ContainerDied","Data":"47d50680be6f3601b1674972a43c0e02f769a5ee9d8f0d19fc8bbbbc8de8677f"} Jan 21 16:45:56 crc kubenswrapper[5021]: I0121 16:45:56.953885 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bq7px" Jan 21 16:45:57 crc kubenswrapper[5021]: I0121 16:45:57.066275 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ca95344-c59d-496c-a380-e2e397d3e333-catalog-content\") pod \"4ca95344-c59d-496c-a380-e2e397d3e333\" (UID: \"4ca95344-c59d-496c-a380-e2e397d3e333\") " Jan 21 16:45:57 crc kubenswrapper[5021]: I0121 16:45:57.066389 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ca95344-c59d-496c-a380-e2e397d3e333-utilities\") pod \"4ca95344-c59d-496c-a380-e2e397d3e333\" (UID: \"4ca95344-c59d-496c-a380-e2e397d3e333\") " Jan 21 16:45:57 crc kubenswrapper[5021]: I0121 16:45:57.066411 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jknsq\" (UniqueName: \"kubernetes.io/projected/4ca95344-c59d-496c-a380-e2e397d3e333-kube-api-access-jknsq\") pod \"4ca95344-c59d-496c-a380-e2e397d3e333\" (UID: \"4ca95344-c59d-496c-a380-e2e397d3e333\") " Jan 21 16:45:57 crc kubenswrapper[5021]: I0121 16:45:57.067509 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4ca95344-c59d-496c-a380-e2e397d3e333-utilities" (OuterVolumeSpecName: "utilities") pod "4ca95344-c59d-496c-a380-e2e397d3e333" (UID: "4ca95344-c59d-496c-a380-e2e397d3e333"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 16:45:57 crc kubenswrapper[5021]: I0121 16:45:57.074581 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4ca95344-c59d-496c-a380-e2e397d3e333-kube-api-access-jknsq" (OuterVolumeSpecName: "kube-api-access-jknsq") pod "4ca95344-c59d-496c-a380-e2e397d3e333" (UID: "4ca95344-c59d-496c-a380-e2e397d3e333"). InnerVolumeSpecName "kube-api-access-jknsq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 16:45:57 crc kubenswrapper[5021]: I0121 16:45:57.119227 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4ca95344-c59d-496c-a380-e2e397d3e333-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4ca95344-c59d-496c-a380-e2e397d3e333" (UID: "4ca95344-c59d-496c-a380-e2e397d3e333"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 16:45:57 crc kubenswrapper[5021]: I0121 16:45:57.167819 5021 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ca95344-c59d-496c-a380-e2e397d3e333-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 16:45:57 crc kubenswrapper[5021]: I0121 16:45:57.167890 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jknsq\" (UniqueName: \"kubernetes.io/projected/4ca95344-c59d-496c-a380-e2e397d3e333-kube-api-access-jknsq\") on node \"crc\" DevicePath \"\"" Jan 21 16:45:57 crc kubenswrapper[5021]: I0121 16:45:57.167935 5021 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ca95344-c59d-496c-a380-e2e397d3e333-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 16:45:57 crc kubenswrapper[5021]: I0121 16:45:57.495783 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bq7px" event={"ID":"4ca95344-c59d-496c-a380-e2e397d3e333","Type":"ContainerDied","Data":"75ec2c03709a8e810aa61acf284e416b8c4d02223b35a1711cec0efdfae580c4"} Jan 21 16:45:57 crc kubenswrapper[5021]: I0121 16:45:57.496199 5021 scope.go:117] "RemoveContainer" containerID="47d50680be6f3601b1674972a43c0e02f769a5ee9d8f0d19fc8bbbbc8de8677f" Jan 21 16:45:57 crc kubenswrapper[5021]: I0121 16:45:57.495843 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bq7px" Jan 21 16:45:57 crc kubenswrapper[5021]: I0121 16:45:57.514337 5021 scope.go:117] "RemoveContainer" containerID="be399562fd047e54a265f23dcf24eb6b1c2c4086291161c9f63c5c7656ad797b" Jan 21 16:45:57 crc kubenswrapper[5021]: I0121 16:45:57.537329 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-bq7px"] Jan 21 16:45:57 crc kubenswrapper[5021]: I0121 16:45:57.544362 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-bq7px"] Jan 21 16:45:57 crc kubenswrapper[5021]: I0121 16:45:57.556573 5021 scope.go:117] "RemoveContainer" containerID="dc1479ab6208b4c9b6360792d5a862412290b9fafd5112cc6b7879687548fe1d" Jan 21 16:45:58 crc kubenswrapper[5021]: I0121 16:45:58.746847 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4ca95344-c59d-496c-a380-e2e397d3e333" path="/var/lib/kubelet/pods/4ca95344-c59d-496c-a380-e2e397d3e333/volumes" Jan 21 16:46:12 crc kubenswrapper[5021]: I0121 16:46:12.357265 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 16:46:12 crc kubenswrapper[5021]: I0121 16:46:12.357976 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 16:46:12 crc kubenswrapper[5021]: I0121 16:46:12.358024 5021 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" Jan 21 16:46:12 crc kubenswrapper[5021]: I0121 16:46:12.358628 5021 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"700ef1326c523225e656c6ac7ec1b4a85773ca798d07c93b076a672f21eb50a5"} pod="openshift-machine-config-operator/machine-config-daemon-n22xz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 21 16:46:12 crc kubenswrapper[5021]: I0121 16:46:12.358679 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" containerID="cri-o://700ef1326c523225e656c6ac7ec1b4a85773ca798d07c93b076a672f21eb50a5" gracePeriod=600 Jan 21 16:46:12 crc kubenswrapper[5021]: I0121 16:46:12.616078 5021 generic.go:334] "Generic (PLEG): container finished" podID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerID="700ef1326c523225e656c6ac7ec1b4a85773ca798d07c93b076a672f21eb50a5" exitCode=0 Jan 21 16:46:12 crc kubenswrapper[5021]: I0121 16:46:12.616318 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" event={"ID":"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1","Type":"ContainerDied","Data":"700ef1326c523225e656c6ac7ec1b4a85773ca798d07c93b076a672f21eb50a5"} Jan 21 16:46:12 crc kubenswrapper[5021]: I0121 16:46:12.616674 5021 scope.go:117] "RemoveContainer" containerID="f6b1fb9418fc722a7a20dda69b64ff74fe4c6b22f2589b779c023d74041121ec" Jan 21 16:46:13 crc kubenswrapper[5021]: I0121 16:46:13.624313 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" event={"ID":"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1","Type":"ContainerStarted","Data":"e0e191f6bc7c7e42fed1b61f5dd62a12896bd25fa4e273919d10adf051c917c6"} Jan 21 16:48:12 crc kubenswrapper[5021]: I0121 16:48:12.357479 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 16:48:12 crc kubenswrapper[5021]: I0121 16:48:12.358030 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 16:48:42 crc kubenswrapper[5021]: I0121 16:48:42.357621 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 16:48:42 crc kubenswrapper[5021]: I0121 16:48:42.359405 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 16:49:12 crc kubenswrapper[5021]: I0121 16:49:12.356941 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 16:49:12 crc kubenswrapper[5021]: I0121 16:49:12.357458 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 16:49:12 crc kubenswrapper[5021]: I0121 16:49:12.357514 5021 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" Jan 21 16:49:12 crc kubenswrapper[5021]: I0121 16:49:12.358182 5021 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e0e191f6bc7c7e42fed1b61f5dd62a12896bd25fa4e273919d10adf051c917c6"} pod="openshift-machine-config-operator/machine-config-daemon-n22xz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 21 16:49:12 crc kubenswrapper[5021]: I0121 16:49:12.358250 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" containerID="cri-o://e0e191f6bc7c7e42fed1b61f5dd62a12896bd25fa4e273919d10adf051c917c6" gracePeriod=600 Jan 21 16:49:12 crc kubenswrapper[5021]: E0121 16:49:12.496751 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:49:12 crc kubenswrapper[5021]: I0121 16:49:12.912228 5021 generic.go:334] "Generic (PLEG): container finished" podID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerID="e0e191f6bc7c7e42fed1b61f5dd62a12896bd25fa4e273919d10adf051c917c6" exitCode=0 Jan 21 16:49:12 crc kubenswrapper[5021]: I0121 16:49:12.912274 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" event={"ID":"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1","Type":"ContainerDied","Data":"e0e191f6bc7c7e42fed1b61f5dd62a12896bd25fa4e273919d10adf051c917c6"} Jan 21 16:49:12 crc kubenswrapper[5021]: I0121 16:49:12.912321 5021 scope.go:117] "RemoveContainer" containerID="700ef1326c523225e656c6ac7ec1b4a85773ca798d07c93b076a672f21eb50a5" Jan 21 16:49:12 crc kubenswrapper[5021]: I0121 16:49:12.912923 5021 scope.go:117] "RemoveContainer" containerID="e0e191f6bc7c7e42fed1b61f5dd62a12896bd25fa4e273919d10adf051c917c6" Jan 21 16:49:12 crc kubenswrapper[5021]: E0121 16:49:12.913182 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:49:26 crc kubenswrapper[5021]: I0121 16:49:26.738555 5021 scope.go:117] "RemoveContainer" containerID="e0e191f6bc7c7e42fed1b61f5dd62a12896bd25fa4e273919d10adf051c917c6" Jan 21 16:49:26 crc kubenswrapper[5021]: E0121 16:49:26.739490 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:49:37 crc kubenswrapper[5021]: I0121 16:49:37.738116 5021 scope.go:117] "RemoveContainer" containerID="e0e191f6bc7c7e42fed1b61f5dd62a12896bd25fa4e273919d10adf051c917c6" Jan 21 16:49:37 crc kubenswrapper[5021]: E0121 16:49:37.739206 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:49:50 crc kubenswrapper[5021]: I0121 16:49:50.738692 5021 scope.go:117] "RemoveContainer" containerID="e0e191f6bc7c7e42fed1b61f5dd62a12896bd25fa4e273919d10adf051c917c6" Jan 21 16:49:50 crc kubenswrapper[5021]: E0121 16:49:50.739340 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:50:03 crc kubenswrapper[5021]: I0121 16:50:03.738287 5021 scope.go:117] "RemoveContainer" containerID="e0e191f6bc7c7e42fed1b61f5dd62a12896bd25fa4e273919d10adf051c917c6" Jan 21 16:50:03 crc kubenswrapper[5021]: E0121 16:50:03.739837 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:50:18 crc kubenswrapper[5021]: I0121 16:50:18.742026 5021 scope.go:117] "RemoveContainer" containerID="e0e191f6bc7c7e42fed1b61f5dd62a12896bd25fa4e273919d10adf051c917c6" Jan 21 16:50:18 crc kubenswrapper[5021]: E0121 16:50:18.744298 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:50:31 crc kubenswrapper[5021]: I0121 16:50:31.737235 5021 scope.go:117] "RemoveContainer" containerID="e0e191f6bc7c7e42fed1b61f5dd62a12896bd25fa4e273919d10adf051c917c6" Jan 21 16:50:31 crc kubenswrapper[5021]: E0121 16:50:31.737899 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:50:46 crc kubenswrapper[5021]: I0121 16:50:46.743755 5021 scope.go:117] "RemoveContainer" containerID="e0e191f6bc7c7e42fed1b61f5dd62a12896bd25fa4e273919d10adf051c917c6" Jan 21 16:50:46 crc kubenswrapper[5021]: E0121 16:50:46.744350 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:50:59 crc kubenswrapper[5021]: I0121 16:50:59.737541 5021 scope.go:117] "RemoveContainer" containerID="e0e191f6bc7c7e42fed1b61f5dd62a12896bd25fa4e273919d10adf051c917c6" Jan 21 16:50:59 crc kubenswrapper[5021]: E0121 16:50:59.738222 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:51:12 crc kubenswrapper[5021]: I0121 16:51:12.738220 5021 scope.go:117] "RemoveContainer" containerID="e0e191f6bc7c7e42fed1b61f5dd62a12896bd25fa4e273919d10adf051c917c6" Jan 21 16:51:12 crc kubenswrapper[5021]: E0121 16:51:12.741230 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:51:27 crc kubenswrapper[5021]: I0121 16:51:27.738036 5021 scope.go:117] "RemoveContainer" containerID="e0e191f6bc7c7e42fed1b61f5dd62a12896bd25fa4e273919d10adf051c917c6" Jan 21 16:51:27 crc kubenswrapper[5021]: E0121 16:51:27.738750 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:51:38 crc kubenswrapper[5021]: I0121 16:51:38.752433 5021 scope.go:117] "RemoveContainer" containerID="e0e191f6bc7c7e42fed1b61f5dd62a12896bd25fa4e273919d10adf051c917c6" Jan 21 16:51:38 crc kubenswrapper[5021]: E0121 16:51:38.753699 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:51:50 crc kubenswrapper[5021]: I0121 16:51:50.738423 5021 scope.go:117] "RemoveContainer" containerID="e0e191f6bc7c7e42fed1b61f5dd62a12896bd25fa4e273919d10adf051c917c6" Jan 21 16:51:50 crc kubenswrapper[5021]: E0121 16:51:50.739811 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:52:01 crc kubenswrapper[5021]: I0121 16:52:01.737714 5021 scope.go:117] "RemoveContainer" containerID="e0e191f6bc7c7e42fed1b61f5dd62a12896bd25fa4e273919d10adf051c917c6" Jan 21 16:52:01 crc kubenswrapper[5021]: E0121 16:52:01.738485 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:52:15 crc kubenswrapper[5021]: I0121 16:52:15.737953 5021 scope.go:117] "RemoveContainer" containerID="e0e191f6bc7c7e42fed1b61f5dd62a12896bd25fa4e273919d10adf051c917c6" Jan 21 16:52:15 crc kubenswrapper[5021]: E0121 16:52:15.738693 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:52:28 crc kubenswrapper[5021]: I0121 16:52:28.754303 5021 scope.go:117] "RemoveContainer" containerID="e0e191f6bc7c7e42fed1b61f5dd62a12896bd25fa4e273919d10adf051c917c6" Jan 21 16:52:28 crc kubenswrapper[5021]: E0121 16:52:28.756754 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:52:39 crc kubenswrapper[5021]: I0121 16:52:39.737705 5021 scope.go:117] "RemoveContainer" containerID="e0e191f6bc7c7e42fed1b61f5dd62a12896bd25fa4e273919d10adf051c917c6" Jan 21 16:52:39 crc kubenswrapper[5021]: E0121 16:52:39.738486 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:52:40 crc kubenswrapper[5021]: I0121 16:52:40.834671 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-5dw9v"] Jan 21 16:52:40 crc kubenswrapper[5021]: E0121 16:52:40.835042 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ca95344-c59d-496c-a380-e2e397d3e333" containerName="extract-content" Jan 21 16:52:40 crc kubenswrapper[5021]: I0121 16:52:40.835057 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ca95344-c59d-496c-a380-e2e397d3e333" containerName="extract-content" Jan 21 16:52:40 crc kubenswrapper[5021]: E0121 16:52:40.835076 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ca95344-c59d-496c-a380-e2e397d3e333" containerName="registry-server" Jan 21 16:52:40 crc kubenswrapper[5021]: I0121 16:52:40.835082 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ca95344-c59d-496c-a380-e2e397d3e333" containerName="registry-server" Jan 21 16:52:40 crc kubenswrapper[5021]: E0121 16:52:40.835098 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ca95344-c59d-496c-a380-e2e397d3e333" containerName="extract-utilities" Jan 21 16:52:40 crc kubenswrapper[5021]: I0121 16:52:40.835105 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ca95344-c59d-496c-a380-e2e397d3e333" containerName="extract-utilities" Jan 21 16:52:40 crc kubenswrapper[5021]: I0121 16:52:40.835257 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="4ca95344-c59d-496c-a380-e2e397d3e333" containerName="registry-server" Jan 21 16:52:40 crc kubenswrapper[5021]: I0121 16:52:40.836315 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5dw9v" Jan 21 16:52:40 crc kubenswrapper[5021]: I0121 16:52:40.845375 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5dw9v"] Jan 21 16:52:40 crc kubenswrapper[5021]: I0121 16:52:40.951610 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a7b5efe9-b164-47f0-b666-39b23e629279-catalog-content\") pod \"redhat-operators-5dw9v\" (UID: \"a7b5efe9-b164-47f0-b666-39b23e629279\") " pod="openshift-marketplace/redhat-operators-5dw9v" Jan 21 16:52:40 crc kubenswrapper[5021]: I0121 16:52:40.951657 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cgsnl\" (UniqueName: \"kubernetes.io/projected/a7b5efe9-b164-47f0-b666-39b23e629279-kube-api-access-cgsnl\") pod \"redhat-operators-5dw9v\" (UID: \"a7b5efe9-b164-47f0-b666-39b23e629279\") " pod="openshift-marketplace/redhat-operators-5dw9v" Jan 21 16:52:40 crc kubenswrapper[5021]: I0121 16:52:40.951768 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a7b5efe9-b164-47f0-b666-39b23e629279-utilities\") pod \"redhat-operators-5dw9v\" (UID: \"a7b5efe9-b164-47f0-b666-39b23e629279\") " pod="openshift-marketplace/redhat-operators-5dw9v" Jan 21 16:52:41 crc kubenswrapper[5021]: I0121 16:52:41.052848 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a7b5efe9-b164-47f0-b666-39b23e629279-utilities\") pod \"redhat-operators-5dw9v\" (UID: \"a7b5efe9-b164-47f0-b666-39b23e629279\") " pod="openshift-marketplace/redhat-operators-5dw9v" Jan 21 16:52:41 crc kubenswrapper[5021]: I0121 16:52:41.052995 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a7b5efe9-b164-47f0-b666-39b23e629279-catalog-content\") pod \"redhat-operators-5dw9v\" (UID: \"a7b5efe9-b164-47f0-b666-39b23e629279\") " pod="openshift-marketplace/redhat-operators-5dw9v" Jan 21 16:52:41 crc kubenswrapper[5021]: I0121 16:52:41.053019 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cgsnl\" (UniqueName: \"kubernetes.io/projected/a7b5efe9-b164-47f0-b666-39b23e629279-kube-api-access-cgsnl\") pod \"redhat-operators-5dw9v\" (UID: \"a7b5efe9-b164-47f0-b666-39b23e629279\") " pod="openshift-marketplace/redhat-operators-5dw9v" Jan 21 16:52:41 crc kubenswrapper[5021]: I0121 16:52:41.053597 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a7b5efe9-b164-47f0-b666-39b23e629279-utilities\") pod \"redhat-operators-5dw9v\" (UID: \"a7b5efe9-b164-47f0-b666-39b23e629279\") " pod="openshift-marketplace/redhat-operators-5dw9v" Jan 21 16:52:41 crc kubenswrapper[5021]: I0121 16:52:41.053658 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a7b5efe9-b164-47f0-b666-39b23e629279-catalog-content\") pod \"redhat-operators-5dw9v\" (UID: \"a7b5efe9-b164-47f0-b666-39b23e629279\") " pod="openshift-marketplace/redhat-operators-5dw9v" Jan 21 16:52:41 crc kubenswrapper[5021]: I0121 16:52:41.075848 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cgsnl\" (UniqueName: \"kubernetes.io/projected/a7b5efe9-b164-47f0-b666-39b23e629279-kube-api-access-cgsnl\") pod \"redhat-operators-5dw9v\" (UID: \"a7b5efe9-b164-47f0-b666-39b23e629279\") " pod="openshift-marketplace/redhat-operators-5dw9v" Jan 21 16:52:41 crc kubenswrapper[5021]: I0121 16:52:41.152083 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5dw9v" Jan 21 16:52:41 crc kubenswrapper[5021]: I0121 16:52:41.585219 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5dw9v"] Jan 21 16:52:42 crc kubenswrapper[5021]: I0121 16:52:42.337067 5021 generic.go:334] "Generic (PLEG): container finished" podID="a7b5efe9-b164-47f0-b666-39b23e629279" containerID="9f8a7013ae028b9baf33217cd67322977bf0249ba621f4fe9dd0d3c74a6a4de9" exitCode=0 Jan 21 16:52:42 crc kubenswrapper[5021]: I0121 16:52:42.337137 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5dw9v" event={"ID":"a7b5efe9-b164-47f0-b666-39b23e629279","Type":"ContainerDied","Data":"9f8a7013ae028b9baf33217cd67322977bf0249ba621f4fe9dd0d3c74a6a4de9"} Jan 21 16:52:42 crc kubenswrapper[5021]: I0121 16:52:42.337178 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5dw9v" event={"ID":"a7b5efe9-b164-47f0-b666-39b23e629279","Type":"ContainerStarted","Data":"0ded9992deb71045e28df18431fb34bc5e67194ca8ecc4dfdf10a55a0e1d6d00"} Jan 21 16:52:42 crc kubenswrapper[5021]: I0121 16:52:42.339685 5021 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 21 16:52:43 crc kubenswrapper[5021]: I0121 16:52:43.344556 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5dw9v" event={"ID":"a7b5efe9-b164-47f0-b666-39b23e629279","Type":"ContainerStarted","Data":"0582c928c6693df1f924b988ebebc1ff52a62b976e2c03ceca96b7bacff656ce"} Jan 21 16:52:44 crc kubenswrapper[5021]: I0121 16:52:44.352651 5021 generic.go:334] "Generic (PLEG): container finished" podID="a7b5efe9-b164-47f0-b666-39b23e629279" containerID="0582c928c6693df1f924b988ebebc1ff52a62b976e2c03ceca96b7bacff656ce" exitCode=0 Jan 21 16:52:44 crc kubenswrapper[5021]: I0121 16:52:44.352689 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5dw9v" event={"ID":"a7b5efe9-b164-47f0-b666-39b23e629279","Type":"ContainerDied","Data":"0582c928c6693df1f924b988ebebc1ff52a62b976e2c03ceca96b7bacff656ce"} Jan 21 16:52:45 crc kubenswrapper[5021]: I0121 16:52:45.359897 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5dw9v" event={"ID":"a7b5efe9-b164-47f0-b666-39b23e629279","Type":"ContainerStarted","Data":"70af62daa4243fb3dec87bfc1d946894f216f6b8831790c4aaa585d874bda359"} Jan 21 16:52:45 crc kubenswrapper[5021]: I0121 16:52:45.385310 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-5dw9v" podStartSLOduration=2.979318842 podStartE2EDuration="5.385287678s" podCreationTimestamp="2026-01-21 16:52:40 +0000 UTC" firstStartedPulling="2026-01-21 16:52:42.339498499 +0000 UTC m=+5303.874612388" lastFinishedPulling="2026-01-21 16:52:44.745467345 +0000 UTC m=+5306.280581224" observedRunningTime="2026-01-21 16:52:45.383086569 +0000 UTC m=+5306.918200478" watchObservedRunningTime="2026-01-21 16:52:45.385287678 +0000 UTC m=+5306.920401567" Jan 21 16:52:51 crc kubenswrapper[5021]: I0121 16:52:51.152331 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-5dw9v" Jan 21 16:52:51 crc kubenswrapper[5021]: I0121 16:52:51.152892 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-5dw9v" Jan 21 16:52:51 crc kubenswrapper[5021]: I0121 16:52:51.200954 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-5dw9v" Jan 21 16:52:51 crc kubenswrapper[5021]: I0121 16:52:51.443753 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-5dw9v" Jan 21 16:52:51 crc kubenswrapper[5021]: I0121 16:52:51.495635 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5dw9v"] Jan 21 16:52:53 crc kubenswrapper[5021]: I0121 16:52:53.411350 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-5dw9v" podUID="a7b5efe9-b164-47f0-b666-39b23e629279" containerName="registry-server" containerID="cri-o://70af62daa4243fb3dec87bfc1d946894f216f6b8831790c4aaa585d874bda359" gracePeriod=2 Jan 21 16:52:54 crc kubenswrapper[5021]: I0121 16:52:54.738602 5021 scope.go:117] "RemoveContainer" containerID="e0e191f6bc7c7e42fed1b61f5dd62a12896bd25fa4e273919d10adf051c917c6" Jan 21 16:52:54 crc kubenswrapper[5021]: E0121 16:52:54.739445 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:52:56 crc kubenswrapper[5021]: I0121 16:52:56.432987 5021 generic.go:334] "Generic (PLEG): container finished" podID="a7b5efe9-b164-47f0-b666-39b23e629279" containerID="70af62daa4243fb3dec87bfc1d946894f216f6b8831790c4aaa585d874bda359" exitCode=0 Jan 21 16:52:56 crc kubenswrapper[5021]: I0121 16:52:56.433081 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5dw9v" event={"ID":"a7b5efe9-b164-47f0-b666-39b23e629279","Type":"ContainerDied","Data":"70af62daa4243fb3dec87bfc1d946894f216f6b8831790c4aaa585d874bda359"} Jan 21 16:52:56 crc kubenswrapper[5021]: I0121 16:52:56.927671 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5dw9v" Jan 21 16:52:57 crc kubenswrapper[5021]: I0121 16:52:57.097434 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cgsnl\" (UniqueName: \"kubernetes.io/projected/a7b5efe9-b164-47f0-b666-39b23e629279-kube-api-access-cgsnl\") pod \"a7b5efe9-b164-47f0-b666-39b23e629279\" (UID: \"a7b5efe9-b164-47f0-b666-39b23e629279\") " Jan 21 16:52:57 crc kubenswrapper[5021]: I0121 16:52:57.097531 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a7b5efe9-b164-47f0-b666-39b23e629279-utilities\") pod \"a7b5efe9-b164-47f0-b666-39b23e629279\" (UID: \"a7b5efe9-b164-47f0-b666-39b23e629279\") " Jan 21 16:52:57 crc kubenswrapper[5021]: I0121 16:52:57.097559 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a7b5efe9-b164-47f0-b666-39b23e629279-catalog-content\") pod \"a7b5efe9-b164-47f0-b666-39b23e629279\" (UID: \"a7b5efe9-b164-47f0-b666-39b23e629279\") " Jan 21 16:52:57 crc kubenswrapper[5021]: I0121 16:52:57.099695 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a7b5efe9-b164-47f0-b666-39b23e629279-utilities" (OuterVolumeSpecName: "utilities") pod "a7b5efe9-b164-47f0-b666-39b23e629279" (UID: "a7b5efe9-b164-47f0-b666-39b23e629279"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 16:52:57 crc kubenswrapper[5021]: I0121 16:52:57.103671 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a7b5efe9-b164-47f0-b666-39b23e629279-kube-api-access-cgsnl" (OuterVolumeSpecName: "kube-api-access-cgsnl") pod "a7b5efe9-b164-47f0-b666-39b23e629279" (UID: "a7b5efe9-b164-47f0-b666-39b23e629279"). InnerVolumeSpecName "kube-api-access-cgsnl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 16:52:57 crc kubenswrapper[5021]: I0121 16:52:57.199777 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cgsnl\" (UniqueName: \"kubernetes.io/projected/a7b5efe9-b164-47f0-b666-39b23e629279-kube-api-access-cgsnl\") on node \"crc\" DevicePath \"\"" Jan 21 16:52:57 crc kubenswrapper[5021]: I0121 16:52:57.199839 5021 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a7b5efe9-b164-47f0-b666-39b23e629279-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 16:52:57 crc kubenswrapper[5021]: I0121 16:52:57.247170 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a7b5efe9-b164-47f0-b666-39b23e629279-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a7b5efe9-b164-47f0-b666-39b23e629279" (UID: "a7b5efe9-b164-47f0-b666-39b23e629279"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 16:52:57 crc kubenswrapper[5021]: I0121 16:52:57.301621 5021 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a7b5efe9-b164-47f0-b666-39b23e629279-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 16:52:57 crc kubenswrapper[5021]: I0121 16:52:57.447652 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5dw9v" event={"ID":"a7b5efe9-b164-47f0-b666-39b23e629279","Type":"ContainerDied","Data":"0ded9992deb71045e28df18431fb34bc5e67194ca8ecc4dfdf10a55a0e1d6d00"} Jan 21 16:52:57 crc kubenswrapper[5021]: I0121 16:52:57.447747 5021 scope.go:117] "RemoveContainer" containerID="70af62daa4243fb3dec87bfc1d946894f216f6b8831790c4aaa585d874bda359" Jan 21 16:52:57 crc kubenswrapper[5021]: I0121 16:52:57.448060 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5dw9v" Jan 21 16:52:57 crc kubenswrapper[5021]: I0121 16:52:57.490372 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5dw9v"] Jan 21 16:52:57 crc kubenswrapper[5021]: I0121 16:52:57.497327 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-5dw9v"] Jan 21 16:52:57 crc kubenswrapper[5021]: I0121 16:52:57.502606 5021 scope.go:117] "RemoveContainer" containerID="0582c928c6693df1f924b988ebebc1ff52a62b976e2c03ceca96b7bacff656ce" Jan 21 16:52:57 crc kubenswrapper[5021]: I0121 16:52:57.550843 5021 scope.go:117] "RemoveContainer" containerID="9f8a7013ae028b9baf33217cd67322977bf0249ba621f4fe9dd0d3c74a6a4de9" Jan 21 16:52:58 crc kubenswrapper[5021]: I0121 16:52:58.746519 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a7b5efe9-b164-47f0-b666-39b23e629279" path="/var/lib/kubelet/pods/a7b5efe9-b164-47f0-b666-39b23e629279/volumes" Jan 21 16:53:05 crc kubenswrapper[5021]: I0121 16:53:05.738422 5021 scope.go:117] "RemoveContainer" containerID="e0e191f6bc7c7e42fed1b61f5dd62a12896bd25fa4e273919d10adf051c917c6" Jan 21 16:53:05 crc kubenswrapper[5021]: E0121 16:53:05.739153 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:53:18 crc kubenswrapper[5021]: I0121 16:53:18.741604 5021 scope.go:117] "RemoveContainer" containerID="e0e191f6bc7c7e42fed1b61f5dd62a12896bd25fa4e273919d10adf051c917c6" Jan 21 16:53:18 crc kubenswrapper[5021]: E0121 16:53:18.742296 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:53:33 crc kubenswrapper[5021]: I0121 16:53:33.737965 5021 scope.go:117] "RemoveContainer" containerID="e0e191f6bc7c7e42fed1b61f5dd62a12896bd25fa4e273919d10adf051c917c6" Jan 21 16:53:33 crc kubenswrapper[5021]: E0121 16:53:33.739112 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:53:45 crc kubenswrapper[5021]: I0121 16:53:45.737555 5021 scope.go:117] "RemoveContainer" containerID="e0e191f6bc7c7e42fed1b61f5dd62a12896bd25fa4e273919d10adf051c917c6" Jan 21 16:53:45 crc kubenswrapper[5021]: E0121 16:53:45.738350 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:54:00 crc kubenswrapper[5021]: I0121 16:54:00.738683 5021 scope.go:117] "RemoveContainer" containerID="e0e191f6bc7c7e42fed1b61f5dd62a12896bd25fa4e273919d10adf051c917c6" Jan 21 16:54:00 crc kubenswrapper[5021]: E0121 16:54:00.740012 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 16:54:12 crc kubenswrapper[5021]: I0121 16:54:12.738258 5021 scope.go:117] "RemoveContainer" containerID="e0e191f6bc7c7e42fed1b61f5dd62a12896bd25fa4e273919d10adf051c917c6" Jan 21 16:54:12 crc kubenswrapper[5021]: I0121 16:54:12.973256 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" event={"ID":"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1","Type":"ContainerStarted","Data":"19cd86e38752260a4dc9391c3046044ceae9db6deaa6db15fbd804a8964fbf1f"} Jan 21 16:54:16 crc kubenswrapper[5021]: I0121 16:54:16.086360 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-jkxv8"] Jan 21 16:54:16 crc kubenswrapper[5021]: E0121 16:54:16.087416 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7b5efe9-b164-47f0-b666-39b23e629279" containerName="extract-content" Jan 21 16:54:16 crc kubenswrapper[5021]: I0121 16:54:16.087435 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7b5efe9-b164-47f0-b666-39b23e629279" containerName="extract-content" Jan 21 16:54:16 crc kubenswrapper[5021]: E0121 16:54:16.087464 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7b5efe9-b164-47f0-b666-39b23e629279" containerName="registry-server" Jan 21 16:54:16 crc kubenswrapper[5021]: I0121 16:54:16.087473 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7b5efe9-b164-47f0-b666-39b23e629279" containerName="registry-server" Jan 21 16:54:16 crc kubenswrapper[5021]: E0121 16:54:16.087495 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7b5efe9-b164-47f0-b666-39b23e629279" containerName="extract-utilities" Jan 21 16:54:16 crc kubenswrapper[5021]: I0121 16:54:16.087504 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7b5efe9-b164-47f0-b666-39b23e629279" containerName="extract-utilities" Jan 21 16:54:16 crc kubenswrapper[5021]: I0121 16:54:16.087684 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7b5efe9-b164-47f0-b666-39b23e629279" containerName="registry-server" Jan 21 16:54:16 crc kubenswrapper[5021]: I0121 16:54:16.089114 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jkxv8" Jan 21 16:54:16 crc kubenswrapper[5021]: I0121 16:54:16.095587 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jkxv8"] Jan 21 16:54:16 crc kubenswrapper[5021]: I0121 16:54:16.120932 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-skjx5\" (UniqueName: \"kubernetes.io/projected/59c027bc-f7e0-41bc-80e1-edb8a7b1f14b-kube-api-access-skjx5\") pod \"redhat-marketplace-jkxv8\" (UID: \"59c027bc-f7e0-41bc-80e1-edb8a7b1f14b\") " pod="openshift-marketplace/redhat-marketplace-jkxv8" Jan 21 16:54:16 crc kubenswrapper[5021]: I0121 16:54:16.121311 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/59c027bc-f7e0-41bc-80e1-edb8a7b1f14b-catalog-content\") pod \"redhat-marketplace-jkxv8\" (UID: \"59c027bc-f7e0-41bc-80e1-edb8a7b1f14b\") " pod="openshift-marketplace/redhat-marketplace-jkxv8" Jan 21 16:54:16 crc kubenswrapper[5021]: I0121 16:54:16.121453 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/59c027bc-f7e0-41bc-80e1-edb8a7b1f14b-utilities\") pod \"redhat-marketplace-jkxv8\" (UID: \"59c027bc-f7e0-41bc-80e1-edb8a7b1f14b\") " pod="openshift-marketplace/redhat-marketplace-jkxv8" Jan 21 16:54:16 crc kubenswrapper[5021]: I0121 16:54:16.222896 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/59c027bc-f7e0-41bc-80e1-edb8a7b1f14b-catalog-content\") pod \"redhat-marketplace-jkxv8\" (UID: \"59c027bc-f7e0-41bc-80e1-edb8a7b1f14b\") " pod="openshift-marketplace/redhat-marketplace-jkxv8" Jan 21 16:54:16 crc kubenswrapper[5021]: I0121 16:54:16.223319 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/59c027bc-f7e0-41bc-80e1-edb8a7b1f14b-utilities\") pod \"redhat-marketplace-jkxv8\" (UID: \"59c027bc-f7e0-41bc-80e1-edb8a7b1f14b\") " pod="openshift-marketplace/redhat-marketplace-jkxv8" Jan 21 16:54:16 crc kubenswrapper[5021]: I0121 16:54:16.223677 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-skjx5\" (UniqueName: \"kubernetes.io/projected/59c027bc-f7e0-41bc-80e1-edb8a7b1f14b-kube-api-access-skjx5\") pod \"redhat-marketplace-jkxv8\" (UID: \"59c027bc-f7e0-41bc-80e1-edb8a7b1f14b\") " pod="openshift-marketplace/redhat-marketplace-jkxv8" Jan 21 16:54:16 crc kubenswrapper[5021]: I0121 16:54:16.224176 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/59c027bc-f7e0-41bc-80e1-edb8a7b1f14b-catalog-content\") pod \"redhat-marketplace-jkxv8\" (UID: \"59c027bc-f7e0-41bc-80e1-edb8a7b1f14b\") " pod="openshift-marketplace/redhat-marketplace-jkxv8" Jan 21 16:54:16 crc kubenswrapper[5021]: I0121 16:54:16.224350 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/59c027bc-f7e0-41bc-80e1-edb8a7b1f14b-utilities\") pod \"redhat-marketplace-jkxv8\" (UID: \"59c027bc-f7e0-41bc-80e1-edb8a7b1f14b\") " pod="openshift-marketplace/redhat-marketplace-jkxv8" Jan 21 16:54:16 crc kubenswrapper[5021]: I0121 16:54:16.251452 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-skjx5\" (UniqueName: \"kubernetes.io/projected/59c027bc-f7e0-41bc-80e1-edb8a7b1f14b-kube-api-access-skjx5\") pod \"redhat-marketplace-jkxv8\" (UID: \"59c027bc-f7e0-41bc-80e1-edb8a7b1f14b\") " pod="openshift-marketplace/redhat-marketplace-jkxv8" Jan 21 16:54:16 crc kubenswrapper[5021]: I0121 16:54:16.414364 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jkxv8" Jan 21 16:54:16 crc kubenswrapper[5021]: I0121 16:54:16.905453 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jkxv8"] Jan 21 16:54:16 crc kubenswrapper[5021]: W0121 16:54:16.918791 5021 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod59c027bc_f7e0_41bc_80e1_edb8a7b1f14b.slice/crio-5ae37504b351fe580281ef5099774103ba2c7b145d3f5a048812457de92ebf88 WatchSource:0}: Error finding container 5ae37504b351fe580281ef5099774103ba2c7b145d3f5a048812457de92ebf88: Status 404 returned error can't find the container with id 5ae37504b351fe580281ef5099774103ba2c7b145d3f5a048812457de92ebf88 Jan 21 16:54:17 crc kubenswrapper[5021]: I0121 16:54:17.002030 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jkxv8" event={"ID":"59c027bc-f7e0-41bc-80e1-edb8a7b1f14b","Type":"ContainerStarted","Data":"5ae37504b351fe580281ef5099774103ba2c7b145d3f5a048812457de92ebf88"} Jan 21 16:54:18 crc kubenswrapper[5021]: I0121 16:54:18.010300 5021 generic.go:334] "Generic (PLEG): container finished" podID="59c027bc-f7e0-41bc-80e1-edb8a7b1f14b" containerID="ab2270b163e656d79ca7b77c92a46ee5e6f371061e0ddc1d64365bcbd8d90f4e" exitCode=0 Jan 21 16:54:18 crc kubenswrapper[5021]: I0121 16:54:18.010583 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jkxv8" event={"ID":"59c027bc-f7e0-41bc-80e1-edb8a7b1f14b","Type":"ContainerDied","Data":"ab2270b163e656d79ca7b77c92a46ee5e6f371061e0ddc1d64365bcbd8d90f4e"} Jan 21 16:54:19 crc kubenswrapper[5021]: I0121 16:54:19.017649 5021 generic.go:334] "Generic (PLEG): container finished" podID="59c027bc-f7e0-41bc-80e1-edb8a7b1f14b" containerID="4317377fe4db2058129ec6ce37c3bfd1e320ff4d399f687d289e28f76a6c6630" exitCode=0 Jan 21 16:54:19 crc kubenswrapper[5021]: I0121 16:54:19.017729 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jkxv8" event={"ID":"59c027bc-f7e0-41bc-80e1-edb8a7b1f14b","Type":"ContainerDied","Data":"4317377fe4db2058129ec6ce37c3bfd1e320ff4d399f687d289e28f76a6c6630"} Jan 21 16:54:20 crc kubenswrapper[5021]: I0121 16:54:20.026749 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jkxv8" event={"ID":"59c027bc-f7e0-41bc-80e1-edb8a7b1f14b","Type":"ContainerStarted","Data":"ecae60a69741066101ca39ee87603ee1a1b3c1ad3f5f28b4749a3dfe29d14e70"} Jan 21 16:54:20 crc kubenswrapper[5021]: I0121 16:54:20.046339 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-jkxv8" podStartSLOduration=2.618805182 podStartE2EDuration="4.046321711s" podCreationTimestamp="2026-01-21 16:54:16 +0000 UTC" firstStartedPulling="2026-01-21 16:54:18.013725155 +0000 UTC m=+5399.548839044" lastFinishedPulling="2026-01-21 16:54:19.441241684 +0000 UTC m=+5400.976355573" observedRunningTime="2026-01-21 16:54:20.045359655 +0000 UTC m=+5401.580473564" watchObservedRunningTime="2026-01-21 16:54:20.046321711 +0000 UTC m=+5401.581435590" Jan 21 16:54:26 crc kubenswrapper[5021]: I0121 16:54:26.417693 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-jkxv8" Jan 21 16:54:26 crc kubenswrapper[5021]: I0121 16:54:26.417987 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-jkxv8" Jan 21 16:54:26 crc kubenswrapper[5021]: I0121 16:54:26.460706 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-jkxv8" Jan 21 16:54:27 crc kubenswrapper[5021]: I0121 16:54:27.113240 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-jkxv8" Jan 21 16:54:27 crc kubenswrapper[5021]: I0121 16:54:27.154711 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jkxv8"] Jan 21 16:54:29 crc kubenswrapper[5021]: I0121 16:54:29.081943 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-jkxv8" podUID="59c027bc-f7e0-41bc-80e1-edb8a7b1f14b" containerName="registry-server" containerID="cri-o://ecae60a69741066101ca39ee87603ee1a1b3c1ad3f5f28b4749a3dfe29d14e70" gracePeriod=2 Jan 21 16:54:30 crc kubenswrapper[5021]: I0121 16:54:30.092819 5021 generic.go:334] "Generic (PLEG): container finished" podID="59c027bc-f7e0-41bc-80e1-edb8a7b1f14b" containerID="ecae60a69741066101ca39ee87603ee1a1b3c1ad3f5f28b4749a3dfe29d14e70" exitCode=0 Jan 21 16:54:30 crc kubenswrapper[5021]: I0121 16:54:30.092891 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jkxv8" event={"ID":"59c027bc-f7e0-41bc-80e1-edb8a7b1f14b","Type":"ContainerDied","Data":"ecae60a69741066101ca39ee87603ee1a1b3c1ad3f5f28b4749a3dfe29d14e70"} Jan 21 16:54:30 crc kubenswrapper[5021]: I0121 16:54:30.653184 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jkxv8" Jan 21 16:54:30 crc kubenswrapper[5021]: I0121 16:54:30.792976 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/59c027bc-f7e0-41bc-80e1-edb8a7b1f14b-utilities\") pod \"59c027bc-f7e0-41bc-80e1-edb8a7b1f14b\" (UID: \"59c027bc-f7e0-41bc-80e1-edb8a7b1f14b\") " Jan 21 16:54:30 crc kubenswrapper[5021]: I0121 16:54:30.793101 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-skjx5\" (UniqueName: \"kubernetes.io/projected/59c027bc-f7e0-41bc-80e1-edb8a7b1f14b-kube-api-access-skjx5\") pod \"59c027bc-f7e0-41bc-80e1-edb8a7b1f14b\" (UID: \"59c027bc-f7e0-41bc-80e1-edb8a7b1f14b\") " Jan 21 16:54:30 crc kubenswrapper[5021]: I0121 16:54:30.793132 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/59c027bc-f7e0-41bc-80e1-edb8a7b1f14b-catalog-content\") pod \"59c027bc-f7e0-41bc-80e1-edb8a7b1f14b\" (UID: \"59c027bc-f7e0-41bc-80e1-edb8a7b1f14b\") " Jan 21 16:54:30 crc kubenswrapper[5021]: I0121 16:54:30.800255 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/59c027bc-f7e0-41bc-80e1-edb8a7b1f14b-utilities" (OuterVolumeSpecName: "utilities") pod "59c027bc-f7e0-41bc-80e1-edb8a7b1f14b" (UID: "59c027bc-f7e0-41bc-80e1-edb8a7b1f14b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 16:54:30 crc kubenswrapper[5021]: I0121 16:54:30.809596 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/59c027bc-f7e0-41bc-80e1-edb8a7b1f14b-kube-api-access-skjx5" (OuterVolumeSpecName: "kube-api-access-skjx5") pod "59c027bc-f7e0-41bc-80e1-edb8a7b1f14b" (UID: "59c027bc-f7e0-41bc-80e1-edb8a7b1f14b"). InnerVolumeSpecName "kube-api-access-skjx5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 16:54:30 crc kubenswrapper[5021]: I0121 16:54:30.827853 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/59c027bc-f7e0-41bc-80e1-edb8a7b1f14b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "59c027bc-f7e0-41bc-80e1-edb8a7b1f14b" (UID: "59c027bc-f7e0-41bc-80e1-edb8a7b1f14b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 16:54:30 crc kubenswrapper[5021]: I0121 16:54:30.894820 5021 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/59c027bc-f7e0-41bc-80e1-edb8a7b1f14b-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 16:54:30 crc kubenswrapper[5021]: I0121 16:54:30.894881 5021 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/59c027bc-f7e0-41bc-80e1-edb8a7b1f14b-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 16:54:30 crc kubenswrapper[5021]: I0121 16:54:30.894898 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-skjx5\" (UniqueName: \"kubernetes.io/projected/59c027bc-f7e0-41bc-80e1-edb8a7b1f14b-kube-api-access-skjx5\") on node \"crc\" DevicePath \"\"" Jan 21 16:54:31 crc kubenswrapper[5021]: I0121 16:54:31.100282 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jkxv8" event={"ID":"59c027bc-f7e0-41bc-80e1-edb8a7b1f14b","Type":"ContainerDied","Data":"5ae37504b351fe580281ef5099774103ba2c7b145d3f5a048812457de92ebf88"} Jan 21 16:54:31 crc kubenswrapper[5021]: I0121 16:54:31.100347 5021 scope.go:117] "RemoveContainer" containerID="ecae60a69741066101ca39ee87603ee1a1b3c1ad3f5f28b4749a3dfe29d14e70" Jan 21 16:54:31 crc kubenswrapper[5021]: I0121 16:54:31.101208 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jkxv8" Jan 21 16:54:31 crc kubenswrapper[5021]: I0121 16:54:31.133496 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jkxv8"] Jan 21 16:54:31 crc kubenswrapper[5021]: I0121 16:54:31.133845 5021 scope.go:117] "RemoveContainer" containerID="4317377fe4db2058129ec6ce37c3bfd1e320ff4d399f687d289e28f76a6c6630" Jan 21 16:54:31 crc kubenswrapper[5021]: I0121 16:54:31.139199 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-jkxv8"] Jan 21 16:54:31 crc kubenswrapper[5021]: I0121 16:54:31.150492 5021 scope.go:117] "RemoveContainer" containerID="ab2270b163e656d79ca7b77c92a46ee5e6f371061e0ddc1d64365bcbd8d90f4e" Jan 21 16:54:32 crc kubenswrapper[5021]: I0121 16:54:32.748529 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="59c027bc-f7e0-41bc-80e1-edb8a7b1f14b" path="/var/lib/kubelet/pods/59c027bc-f7e0-41bc-80e1-edb8a7b1f14b/volumes" Jan 21 16:55:54 crc kubenswrapper[5021]: I0121 16:55:54.691893 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-dpbwg"] Jan 21 16:55:54 crc kubenswrapper[5021]: E0121 16:55:54.693525 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59c027bc-f7e0-41bc-80e1-edb8a7b1f14b" containerName="extract-utilities" Jan 21 16:55:54 crc kubenswrapper[5021]: I0121 16:55:54.693573 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="59c027bc-f7e0-41bc-80e1-edb8a7b1f14b" containerName="extract-utilities" Jan 21 16:55:54 crc kubenswrapper[5021]: E0121 16:55:54.693623 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59c027bc-f7e0-41bc-80e1-edb8a7b1f14b" containerName="registry-server" Jan 21 16:55:54 crc kubenswrapper[5021]: I0121 16:55:54.693633 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="59c027bc-f7e0-41bc-80e1-edb8a7b1f14b" containerName="registry-server" Jan 21 16:55:54 crc kubenswrapper[5021]: E0121 16:55:54.693653 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59c027bc-f7e0-41bc-80e1-edb8a7b1f14b" containerName="extract-content" Jan 21 16:55:54 crc kubenswrapper[5021]: I0121 16:55:54.693665 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="59c027bc-f7e0-41bc-80e1-edb8a7b1f14b" containerName="extract-content" Jan 21 16:55:54 crc kubenswrapper[5021]: I0121 16:55:54.693965 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="59c027bc-f7e0-41bc-80e1-edb8a7b1f14b" containerName="registry-server" Jan 21 16:55:54 crc kubenswrapper[5021]: I0121 16:55:54.695732 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dpbwg" Jan 21 16:55:54 crc kubenswrapper[5021]: I0121 16:55:54.702047 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dpbwg"] Jan 21 16:55:54 crc kubenswrapper[5021]: I0121 16:55:54.789194 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2cthw\" (UniqueName: \"kubernetes.io/projected/3da001e5-1773-4f2d-a084-5e185737af1e-kube-api-access-2cthw\") pod \"certified-operators-dpbwg\" (UID: \"3da001e5-1773-4f2d-a084-5e185737af1e\") " pod="openshift-marketplace/certified-operators-dpbwg" Jan 21 16:55:54 crc kubenswrapper[5021]: I0121 16:55:54.789309 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3da001e5-1773-4f2d-a084-5e185737af1e-utilities\") pod \"certified-operators-dpbwg\" (UID: \"3da001e5-1773-4f2d-a084-5e185737af1e\") " pod="openshift-marketplace/certified-operators-dpbwg" Jan 21 16:55:54 crc kubenswrapper[5021]: I0121 16:55:54.789451 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3da001e5-1773-4f2d-a084-5e185737af1e-catalog-content\") pod \"certified-operators-dpbwg\" (UID: \"3da001e5-1773-4f2d-a084-5e185737af1e\") " pod="openshift-marketplace/certified-operators-dpbwg" Jan 21 16:55:54 crc kubenswrapper[5021]: I0121 16:55:54.891335 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2cthw\" (UniqueName: \"kubernetes.io/projected/3da001e5-1773-4f2d-a084-5e185737af1e-kube-api-access-2cthw\") pod \"certified-operators-dpbwg\" (UID: \"3da001e5-1773-4f2d-a084-5e185737af1e\") " pod="openshift-marketplace/certified-operators-dpbwg" Jan 21 16:55:54 crc kubenswrapper[5021]: I0121 16:55:54.891407 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3da001e5-1773-4f2d-a084-5e185737af1e-utilities\") pod \"certified-operators-dpbwg\" (UID: \"3da001e5-1773-4f2d-a084-5e185737af1e\") " pod="openshift-marketplace/certified-operators-dpbwg" Jan 21 16:55:54 crc kubenswrapper[5021]: I0121 16:55:54.891443 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3da001e5-1773-4f2d-a084-5e185737af1e-catalog-content\") pod \"certified-operators-dpbwg\" (UID: \"3da001e5-1773-4f2d-a084-5e185737af1e\") " pod="openshift-marketplace/certified-operators-dpbwg" Jan 21 16:55:54 crc kubenswrapper[5021]: I0121 16:55:54.892400 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3da001e5-1773-4f2d-a084-5e185737af1e-catalog-content\") pod \"certified-operators-dpbwg\" (UID: \"3da001e5-1773-4f2d-a084-5e185737af1e\") " pod="openshift-marketplace/certified-operators-dpbwg" Jan 21 16:55:54 crc kubenswrapper[5021]: I0121 16:55:54.892506 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3da001e5-1773-4f2d-a084-5e185737af1e-utilities\") pod \"certified-operators-dpbwg\" (UID: \"3da001e5-1773-4f2d-a084-5e185737af1e\") " pod="openshift-marketplace/certified-operators-dpbwg" Jan 21 16:55:54 crc kubenswrapper[5021]: I0121 16:55:54.916198 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2cthw\" (UniqueName: \"kubernetes.io/projected/3da001e5-1773-4f2d-a084-5e185737af1e-kube-api-access-2cthw\") pod \"certified-operators-dpbwg\" (UID: \"3da001e5-1773-4f2d-a084-5e185737af1e\") " pod="openshift-marketplace/certified-operators-dpbwg" Jan 21 16:55:55 crc kubenswrapper[5021]: I0121 16:55:55.027123 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dpbwg" Jan 21 16:55:55 crc kubenswrapper[5021]: I0121 16:55:55.552471 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dpbwg"] Jan 21 16:55:55 crc kubenswrapper[5021]: I0121 16:55:55.691699 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dpbwg" event={"ID":"3da001e5-1773-4f2d-a084-5e185737af1e","Type":"ContainerStarted","Data":"2cae22072867d1d972454333880f68992e39cb4749d991dd184c66bc38404a0c"} Jan 21 16:55:56 crc kubenswrapper[5021]: I0121 16:55:56.698611 5021 generic.go:334] "Generic (PLEG): container finished" podID="3da001e5-1773-4f2d-a084-5e185737af1e" containerID="13813aec235355172cbdc2528b94efb9195e38504645de4d5d19ceb28628a0de" exitCode=0 Jan 21 16:55:56 crc kubenswrapper[5021]: I0121 16:55:56.698652 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dpbwg" event={"ID":"3da001e5-1773-4f2d-a084-5e185737af1e","Type":"ContainerDied","Data":"13813aec235355172cbdc2528b94efb9195e38504645de4d5d19ceb28628a0de"} Jan 21 16:55:58 crc kubenswrapper[5021]: I0121 16:55:58.712693 5021 generic.go:334] "Generic (PLEG): container finished" podID="3da001e5-1773-4f2d-a084-5e185737af1e" containerID="3c8dabe09976f798e6eb2fd16580f2d875e04ba5e0d490cdcc5dd7a4cce52686" exitCode=0 Jan 21 16:55:58 crc kubenswrapper[5021]: I0121 16:55:58.712770 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dpbwg" event={"ID":"3da001e5-1773-4f2d-a084-5e185737af1e","Type":"ContainerDied","Data":"3c8dabe09976f798e6eb2fd16580f2d875e04ba5e0d490cdcc5dd7a4cce52686"} Jan 21 16:55:59 crc kubenswrapper[5021]: I0121 16:55:59.721432 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dpbwg" event={"ID":"3da001e5-1773-4f2d-a084-5e185737af1e","Type":"ContainerStarted","Data":"89da5020e7db6d44861a02c28161bf5d7d5a738e6ca770139d55340ed4a9043e"} Jan 21 16:55:59 crc kubenswrapper[5021]: I0121 16:55:59.747095 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-dpbwg" podStartSLOduration=3.257104573 podStartE2EDuration="5.7470762s" podCreationTimestamp="2026-01-21 16:55:54 +0000 UTC" firstStartedPulling="2026-01-21 16:55:56.700078946 +0000 UTC m=+5498.235192825" lastFinishedPulling="2026-01-21 16:55:59.190050563 +0000 UTC m=+5500.725164452" observedRunningTime="2026-01-21 16:55:59.7393989 +0000 UTC m=+5501.274512799" watchObservedRunningTime="2026-01-21 16:55:59.7470762 +0000 UTC m=+5501.282190079" Jan 21 16:56:05 crc kubenswrapper[5021]: I0121 16:56:05.027778 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-dpbwg" Jan 21 16:56:05 crc kubenswrapper[5021]: I0121 16:56:05.028397 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-dpbwg" Jan 21 16:56:05 crc kubenswrapper[5021]: I0121 16:56:05.074896 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-dpbwg" Jan 21 16:56:05 crc kubenswrapper[5021]: I0121 16:56:05.797521 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-dpbwg" Jan 21 16:56:05 crc kubenswrapper[5021]: I0121 16:56:05.858231 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dpbwg"] Jan 21 16:56:07 crc kubenswrapper[5021]: I0121 16:56:07.773516 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-dpbwg" podUID="3da001e5-1773-4f2d-a084-5e185737af1e" containerName="registry-server" containerID="cri-o://89da5020e7db6d44861a02c28161bf5d7d5a738e6ca770139d55340ed4a9043e" gracePeriod=2 Jan 21 16:56:08 crc kubenswrapper[5021]: I0121 16:56:08.782577 5021 generic.go:334] "Generic (PLEG): container finished" podID="3da001e5-1773-4f2d-a084-5e185737af1e" containerID="89da5020e7db6d44861a02c28161bf5d7d5a738e6ca770139d55340ed4a9043e" exitCode=0 Jan 21 16:56:08 crc kubenswrapper[5021]: I0121 16:56:08.782624 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dpbwg" event={"ID":"3da001e5-1773-4f2d-a084-5e185737af1e","Type":"ContainerDied","Data":"89da5020e7db6d44861a02c28161bf5d7d5a738e6ca770139d55340ed4a9043e"} Jan 21 16:56:09 crc kubenswrapper[5021]: I0121 16:56:09.010375 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dpbwg" Jan 21 16:56:09 crc kubenswrapper[5021]: I0121 16:56:09.122970 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2cthw\" (UniqueName: \"kubernetes.io/projected/3da001e5-1773-4f2d-a084-5e185737af1e-kube-api-access-2cthw\") pod \"3da001e5-1773-4f2d-a084-5e185737af1e\" (UID: \"3da001e5-1773-4f2d-a084-5e185737af1e\") " Jan 21 16:56:09 crc kubenswrapper[5021]: I0121 16:56:09.123031 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3da001e5-1773-4f2d-a084-5e185737af1e-catalog-content\") pod \"3da001e5-1773-4f2d-a084-5e185737af1e\" (UID: \"3da001e5-1773-4f2d-a084-5e185737af1e\") " Jan 21 16:56:09 crc kubenswrapper[5021]: I0121 16:56:09.123055 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3da001e5-1773-4f2d-a084-5e185737af1e-utilities\") pod \"3da001e5-1773-4f2d-a084-5e185737af1e\" (UID: \"3da001e5-1773-4f2d-a084-5e185737af1e\") " Jan 21 16:56:09 crc kubenswrapper[5021]: I0121 16:56:09.124070 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3da001e5-1773-4f2d-a084-5e185737af1e-utilities" (OuterVolumeSpecName: "utilities") pod "3da001e5-1773-4f2d-a084-5e185737af1e" (UID: "3da001e5-1773-4f2d-a084-5e185737af1e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 16:56:09 crc kubenswrapper[5021]: I0121 16:56:09.131124 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3da001e5-1773-4f2d-a084-5e185737af1e-kube-api-access-2cthw" (OuterVolumeSpecName: "kube-api-access-2cthw") pod "3da001e5-1773-4f2d-a084-5e185737af1e" (UID: "3da001e5-1773-4f2d-a084-5e185737af1e"). InnerVolumeSpecName "kube-api-access-2cthw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 16:56:09 crc kubenswrapper[5021]: I0121 16:56:09.174701 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3da001e5-1773-4f2d-a084-5e185737af1e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3da001e5-1773-4f2d-a084-5e185737af1e" (UID: "3da001e5-1773-4f2d-a084-5e185737af1e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 16:56:09 crc kubenswrapper[5021]: I0121 16:56:09.224505 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2cthw\" (UniqueName: \"kubernetes.io/projected/3da001e5-1773-4f2d-a084-5e185737af1e-kube-api-access-2cthw\") on node \"crc\" DevicePath \"\"" Jan 21 16:56:09 crc kubenswrapper[5021]: I0121 16:56:09.224561 5021 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3da001e5-1773-4f2d-a084-5e185737af1e-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 16:56:09 crc kubenswrapper[5021]: I0121 16:56:09.224576 5021 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3da001e5-1773-4f2d-a084-5e185737af1e-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 16:56:09 crc kubenswrapper[5021]: I0121 16:56:09.790382 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dpbwg" event={"ID":"3da001e5-1773-4f2d-a084-5e185737af1e","Type":"ContainerDied","Data":"2cae22072867d1d972454333880f68992e39cb4749d991dd184c66bc38404a0c"} Jan 21 16:56:09 crc kubenswrapper[5021]: I0121 16:56:09.790456 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dpbwg" Jan 21 16:56:09 crc kubenswrapper[5021]: I0121 16:56:09.790724 5021 scope.go:117] "RemoveContainer" containerID="89da5020e7db6d44861a02c28161bf5d7d5a738e6ca770139d55340ed4a9043e" Jan 21 16:56:09 crc kubenswrapper[5021]: I0121 16:56:09.832526 5021 scope.go:117] "RemoveContainer" containerID="3c8dabe09976f798e6eb2fd16580f2d875e04ba5e0d490cdcc5dd7a4cce52686" Jan 21 16:56:09 crc kubenswrapper[5021]: I0121 16:56:09.832870 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dpbwg"] Jan 21 16:56:09 crc kubenswrapper[5021]: I0121 16:56:09.840477 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-dpbwg"] Jan 21 16:56:09 crc kubenswrapper[5021]: I0121 16:56:09.854089 5021 scope.go:117] "RemoveContainer" containerID="13813aec235355172cbdc2528b94efb9195e38504645de4d5d19ceb28628a0de" Jan 21 16:56:10 crc kubenswrapper[5021]: I0121 16:56:10.747379 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3da001e5-1773-4f2d-a084-5e185737af1e" path="/var/lib/kubelet/pods/3da001e5-1773-4f2d-a084-5e185737af1e/volumes" Jan 21 16:56:12 crc kubenswrapper[5021]: I0121 16:56:12.357487 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 16:56:12 crc kubenswrapper[5021]: I0121 16:56:12.357855 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 16:56:29 crc kubenswrapper[5021]: I0121 16:56:29.419605 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-xsv4n"] Jan 21 16:56:29 crc kubenswrapper[5021]: E0121 16:56:29.420509 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3da001e5-1773-4f2d-a084-5e185737af1e" containerName="extract-content" Jan 21 16:56:29 crc kubenswrapper[5021]: I0121 16:56:29.420523 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="3da001e5-1773-4f2d-a084-5e185737af1e" containerName="extract-content" Jan 21 16:56:29 crc kubenswrapper[5021]: E0121 16:56:29.420543 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3da001e5-1773-4f2d-a084-5e185737af1e" containerName="extract-utilities" Jan 21 16:56:29 crc kubenswrapper[5021]: I0121 16:56:29.420550 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="3da001e5-1773-4f2d-a084-5e185737af1e" containerName="extract-utilities" Jan 21 16:56:29 crc kubenswrapper[5021]: E0121 16:56:29.420569 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3da001e5-1773-4f2d-a084-5e185737af1e" containerName="registry-server" Jan 21 16:56:29 crc kubenswrapper[5021]: I0121 16:56:29.420576 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="3da001e5-1773-4f2d-a084-5e185737af1e" containerName="registry-server" Jan 21 16:56:29 crc kubenswrapper[5021]: I0121 16:56:29.420718 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="3da001e5-1773-4f2d-a084-5e185737af1e" containerName="registry-server" Jan 21 16:56:29 crc kubenswrapper[5021]: I0121 16:56:29.421720 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xsv4n" Jan 21 16:56:29 crc kubenswrapper[5021]: I0121 16:56:29.431304 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xsv4n"] Jan 21 16:56:29 crc kubenswrapper[5021]: I0121 16:56:29.542300 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e5e10ed-bc9f-467c-a79c-cd3c7923383f-utilities\") pod \"community-operators-xsv4n\" (UID: \"4e5e10ed-bc9f-467c-a79c-cd3c7923383f\") " pod="openshift-marketplace/community-operators-xsv4n" Jan 21 16:56:29 crc kubenswrapper[5021]: I0121 16:56:29.542625 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2vxhm\" (UniqueName: \"kubernetes.io/projected/4e5e10ed-bc9f-467c-a79c-cd3c7923383f-kube-api-access-2vxhm\") pod \"community-operators-xsv4n\" (UID: \"4e5e10ed-bc9f-467c-a79c-cd3c7923383f\") " pod="openshift-marketplace/community-operators-xsv4n" Jan 21 16:56:29 crc kubenswrapper[5021]: I0121 16:56:29.542788 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e5e10ed-bc9f-467c-a79c-cd3c7923383f-catalog-content\") pod \"community-operators-xsv4n\" (UID: \"4e5e10ed-bc9f-467c-a79c-cd3c7923383f\") " pod="openshift-marketplace/community-operators-xsv4n" Jan 21 16:56:29 crc kubenswrapper[5021]: I0121 16:56:29.661876 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e5e10ed-bc9f-467c-a79c-cd3c7923383f-catalog-content\") pod \"community-operators-xsv4n\" (UID: \"4e5e10ed-bc9f-467c-a79c-cd3c7923383f\") " pod="openshift-marketplace/community-operators-xsv4n" Jan 21 16:56:29 crc kubenswrapper[5021]: I0121 16:56:29.661951 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e5e10ed-bc9f-467c-a79c-cd3c7923383f-utilities\") pod \"community-operators-xsv4n\" (UID: \"4e5e10ed-bc9f-467c-a79c-cd3c7923383f\") " pod="openshift-marketplace/community-operators-xsv4n" Jan 21 16:56:29 crc kubenswrapper[5021]: I0121 16:56:29.662058 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2vxhm\" (UniqueName: \"kubernetes.io/projected/4e5e10ed-bc9f-467c-a79c-cd3c7923383f-kube-api-access-2vxhm\") pod \"community-operators-xsv4n\" (UID: \"4e5e10ed-bc9f-467c-a79c-cd3c7923383f\") " pod="openshift-marketplace/community-operators-xsv4n" Jan 21 16:56:29 crc kubenswrapper[5021]: I0121 16:56:29.662457 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e5e10ed-bc9f-467c-a79c-cd3c7923383f-catalog-content\") pod \"community-operators-xsv4n\" (UID: \"4e5e10ed-bc9f-467c-a79c-cd3c7923383f\") " pod="openshift-marketplace/community-operators-xsv4n" Jan 21 16:56:29 crc kubenswrapper[5021]: I0121 16:56:29.663522 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e5e10ed-bc9f-467c-a79c-cd3c7923383f-utilities\") pod \"community-operators-xsv4n\" (UID: \"4e5e10ed-bc9f-467c-a79c-cd3c7923383f\") " pod="openshift-marketplace/community-operators-xsv4n" Jan 21 16:56:29 crc kubenswrapper[5021]: I0121 16:56:29.682965 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2vxhm\" (UniqueName: \"kubernetes.io/projected/4e5e10ed-bc9f-467c-a79c-cd3c7923383f-kube-api-access-2vxhm\") pod \"community-operators-xsv4n\" (UID: \"4e5e10ed-bc9f-467c-a79c-cd3c7923383f\") " pod="openshift-marketplace/community-operators-xsv4n" Jan 21 16:56:29 crc kubenswrapper[5021]: I0121 16:56:29.753989 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xsv4n" Jan 21 16:56:30 crc kubenswrapper[5021]: I0121 16:56:30.263080 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xsv4n"] Jan 21 16:56:30 crc kubenswrapper[5021]: I0121 16:56:30.956385 5021 generic.go:334] "Generic (PLEG): container finished" podID="4e5e10ed-bc9f-467c-a79c-cd3c7923383f" containerID="47e50d0e321ab652435b414ed3e453b867db161360f13c7903d6cb174a61f2e6" exitCode=0 Jan 21 16:56:30 crc kubenswrapper[5021]: I0121 16:56:30.956429 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xsv4n" event={"ID":"4e5e10ed-bc9f-467c-a79c-cd3c7923383f","Type":"ContainerDied","Data":"47e50d0e321ab652435b414ed3e453b867db161360f13c7903d6cb174a61f2e6"} Jan 21 16:56:30 crc kubenswrapper[5021]: I0121 16:56:30.956459 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xsv4n" event={"ID":"4e5e10ed-bc9f-467c-a79c-cd3c7923383f","Type":"ContainerStarted","Data":"4ac1eab05a7b2c79d9129849983cb40ab04969442693ba3e7ad5d2d185dddd26"} Jan 21 16:56:31 crc kubenswrapper[5021]: I0121 16:56:31.965221 5021 generic.go:334] "Generic (PLEG): container finished" podID="4e5e10ed-bc9f-467c-a79c-cd3c7923383f" containerID="2ad9ee951330ab1d37d557e0a561481b2daed0e0916e81e617f3c9816b61a6fb" exitCode=0 Jan 21 16:56:31 crc kubenswrapper[5021]: I0121 16:56:31.965311 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xsv4n" event={"ID":"4e5e10ed-bc9f-467c-a79c-cd3c7923383f","Type":"ContainerDied","Data":"2ad9ee951330ab1d37d557e0a561481b2daed0e0916e81e617f3c9816b61a6fb"} Jan 21 16:56:32 crc kubenswrapper[5021]: I0121 16:56:32.974549 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xsv4n" event={"ID":"4e5e10ed-bc9f-467c-a79c-cd3c7923383f","Type":"ContainerStarted","Data":"940be4333ed6e9b42bbb50ef75e3610c15e78ae5eb49eaf133a9aa6c9766d353"} Jan 21 16:56:32 crc kubenswrapper[5021]: I0121 16:56:32.998571 5021 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-xsv4n" podStartSLOduration=2.539452022 podStartE2EDuration="3.998552173s" podCreationTimestamp="2026-01-21 16:56:29 +0000 UTC" firstStartedPulling="2026-01-21 16:56:30.959083718 +0000 UTC m=+5532.494197607" lastFinishedPulling="2026-01-21 16:56:32.418183869 +0000 UTC m=+5533.953297758" observedRunningTime="2026-01-21 16:56:32.992293312 +0000 UTC m=+5534.527407201" watchObservedRunningTime="2026-01-21 16:56:32.998552173 +0000 UTC m=+5534.533666052" Jan 21 16:56:39 crc kubenswrapper[5021]: I0121 16:56:39.754706 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-xsv4n" Jan 21 16:56:39 crc kubenswrapper[5021]: I0121 16:56:39.755368 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-xsv4n" Jan 21 16:56:39 crc kubenswrapper[5021]: I0121 16:56:39.798569 5021 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-xsv4n" Jan 21 16:56:40 crc kubenswrapper[5021]: I0121 16:56:40.064209 5021 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-xsv4n" Jan 21 16:56:40 crc kubenswrapper[5021]: I0121 16:56:40.113710 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-xsv4n"] Jan 21 16:56:42 crc kubenswrapper[5021]: I0121 16:56:42.037815 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-xsv4n" podUID="4e5e10ed-bc9f-467c-a79c-cd3c7923383f" containerName="registry-server" containerID="cri-o://940be4333ed6e9b42bbb50ef75e3610c15e78ae5eb49eaf133a9aa6c9766d353" gracePeriod=2 Jan 21 16:56:42 crc kubenswrapper[5021]: I0121 16:56:42.363366 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 16:56:42 crc kubenswrapper[5021]: I0121 16:56:42.363437 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 16:56:45 crc kubenswrapper[5021]: I0121 16:56:45.072177 5021 generic.go:334] "Generic (PLEG): container finished" podID="4e5e10ed-bc9f-467c-a79c-cd3c7923383f" containerID="940be4333ed6e9b42bbb50ef75e3610c15e78ae5eb49eaf133a9aa6c9766d353" exitCode=0 Jan 21 16:56:45 crc kubenswrapper[5021]: I0121 16:56:45.072281 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xsv4n" event={"ID":"4e5e10ed-bc9f-467c-a79c-cd3c7923383f","Type":"ContainerDied","Data":"940be4333ed6e9b42bbb50ef75e3610c15e78ae5eb49eaf133a9aa6c9766d353"} Jan 21 16:56:45 crc kubenswrapper[5021]: I0121 16:56:45.072535 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xsv4n" event={"ID":"4e5e10ed-bc9f-467c-a79c-cd3c7923383f","Type":"ContainerDied","Data":"4ac1eab05a7b2c79d9129849983cb40ab04969442693ba3e7ad5d2d185dddd26"} Jan 21 16:56:45 crc kubenswrapper[5021]: I0121 16:56:45.072573 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4ac1eab05a7b2c79d9129849983cb40ab04969442693ba3e7ad5d2d185dddd26" Jan 21 16:56:45 crc kubenswrapper[5021]: I0121 16:56:45.096359 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xsv4n" Jan 21 16:56:45 crc kubenswrapper[5021]: I0121 16:56:45.188856 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e5e10ed-bc9f-467c-a79c-cd3c7923383f-catalog-content\") pod \"4e5e10ed-bc9f-467c-a79c-cd3c7923383f\" (UID: \"4e5e10ed-bc9f-467c-a79c-cd3c7923383f\") " Jan 21 16:56:45 crc kubenswrapper[5021]: I0121 16:56:45.189057 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e5e10ed-bc9f-467c-a79c-cd3c7923383f-utilities\") pod \"4e5e10ed-bc9f-467c-a79c-cd3c7923383f\" (UID: \"4e5e10ed-bc9f-467c-a79c-cd3c7923383f\") " Jan 21 16:56:45 crc kubenswrapper[5021]: I0121 16:56:45.189107 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2vxhm\" (UniqueName: \"kubernetes.io/projected/4e5e10ed-bc9f-467c-a79c-cd3c7923383f-kube-api-access-2vxhm\") pod \"4e5e10ed-bc9f-467c-a79c-cd3c7923383f\" (UID: \"4e5e10ed-bc9f-467c-a79c-cd3c7923383f\") " Jan 21 16:56:45 crc kubenswrapper[5021]: I0121 16:56:45.189996 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4e5e10ed-bc9f-467c-a79c-cd3c7923383f-utilities" (OuterVolumeSpecName: "utilities") pod "4e5e10ed-bc9f-467c-a79c-cd3c7923383f" (UID: "4e5e10ed-bc9f-467c-a79c-cd3c7923383f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 16:56:45 crc kubenswrapper[5021]: I0121 16:56:45.195898 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4e5e10ed-bc9f-467c-a79c-cd3c7923383f-kube-api-access-2vxhm" (OuterVolumeSpecName: "kube-api-access-2vxhm") pod "4e5e10ed-bc9f-467c-a79c-cd3c7923383f" (UID: "4e5e10ed-bc9f-467c-a79c-cd3c7923383f"). InnerVolumeSpecName "kube-api-access-2vxhm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 16:56:45 crc kubenswrapper[5021]: I0121 16:56:45.240864 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4e5e10ed-bc9f-467c-a79c-cd3c7923383f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4e5e10ed-bc9f-467c-a79c-cd3c7923383f" (UID: "4e5e10ed-bc9f-467c-a79c-cd3c7923383f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 16:56:45 crc kubenswrapper[5021]: I0121 16:56:45.290245 5021 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e5e10ed-bc9f-467c-a79c-cd3c7923383f-utilities\") on node \"crc\" DevicePath \"\"" Jan 21 16:56:45 crc kubenswrapper[5021]: I0121 16:56:45.290277 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2vxhm\" (UniqueName: \"kubernetes.io/projected/4e5e10ed-bc9f-467c-a79c-cd3c7923383f-kube-api-access-2vxhm\") on node \"crc\" DevicePath \"\"" Jan 21 16:56:45 crc kubenswrapper[5021]: I0121 16:56:45.290286 5021 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e5e10ed-bc9f-467c-a79c-cd3c7923383f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 21 16:56:46 crc kubenswrapper[5021]: I0121 16:56:46.078669 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xsv4n" Jan 21 16:56:46 crc kubenswrapper[5021]: I0121 16:56:46.112448 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-xsv4n"] Jan 21 16:56:46 crc kubenswrapper[5021]: I0121 16:56:46.119553 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-xsv4n"] Jan 21 16:56:46 crc kubenswrapper[5021]: I0121 16:56:46.746339 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4e5e10ed-bc9f-467c-a79c-cd3c7923383f" path="/var/lib/kubelet/pods/4e5e10ed-bc9f-467c-a79c-cd3c7923383f/volumes" Jan 21 16:57:12 crc kubenswrapper[5021]: I0121 16:57:12.356955 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 16:57:12 crc kubenswrapper[5021]: I0121 16:57:12.357498 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 16:57:12 crc kubenswrapper[5021]: I0121 16:57:12.357571 5021 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" Jan 21 16:57:12 crc kubenswrapper[5021]: I0121 16:57:12.358266 5021 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"19cd86e38752260a4dc9391c3046044ceae9db6deaa6db15fbd804a8964fbf1f"} pod="openshift-machine-config-operator/machine-config-daemon-n22xz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 21 16:57:12 crc kubenswrapper[5021]: I0121 16:57:12.358322 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" containerID="cri-o://19cd86e38752260a4dc9391c3046044ceae9db6deaa6db15fbd804a8964fbf1f" gracePeriod=600 Jan 21 16:57:13 crc kubenswrapper[5021]: I0121 16:57:13.272800 5021 generic.go:334] "Generic (PLEG): container finished" podID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerID="19cd86e38752260a4dc9391c3046044ceae9db6deaa6db15fbd804a8964fbf1f" exitCode=0 Jan 21 16:57:13 crc kubenswrapper[5021]: I0121 16:57:13.272847 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" event={"ID":"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1","Type":"ContainerDied","Data":"19cd86e38752260a4dc9391c3046044ceae9db6deaa6db15fbd804a8964fbf1f"} Jan 21 16:57:13 crc kubenswrapper[5021]: I0121 16:57:13.273380 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" event={"ID":"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1","Type":"ContainerStarted","Data":"c44b6b71bc63928326f9d442ea67fea899c05f9c5bf784990e39ce949f1c57f4"} Jan 21 16:57:13 crc kubenswrapper[5021]: I0121 16:57:13.273406 5021 scope.go:117] "RemoveContainer" containerID="e0e191f6bc7c7e42fed1b61f5dd62a12896bd25fa4e273919d10adf051c917c6" Jan 21 16:59:12 crc kubenswrapper[5021]: I0121 16:59:12.357549 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 16:59:12 crc kubenswrapper[5021]: I0121 16:59:12.358097 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 16:59:42 crc kubenswrapper[5021]: I0121 16:59:42.357056 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 16:59:42 crc kubenswrapper[5021]: I0121 16:59:42.357662 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 17:00:00 crc kubenswrapper[5021]: I0121 17:00:00.148164 5021 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483580-7vkzj"] Jan 21 17:00:00 crc kubenswrapper[5021]: E0121 17:00:00.149059 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e5e10ed-bc9f-467c-a79c-cd3c7923383f" containerName="registry-server" Jan 21 17:00:00 crc kubenswrapper[5021]: I0121 17:00:00.149076 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e5e10ed-bc9f-467c-a79c-cd3c7923383f" containerName="registry-server" Jan 21 17:00:00 crc kubenswrapper[5021]: E0121 17:00:00.149101 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e5e10ed-bc9f-467c-a79c-cd3c7923383f" containerName="extract-content" Jan 21 17:00:00 crc kubenswrapper[5021]: I0121 17:00:00.149109 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e5e10ed-bc9f-467c-a79c-cd3c7923383f" containerName="extract-content" Jan 21 17:00:00 crc kubenswrapper[5021]: E0121 17:00:00.149122 5021 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e5e10ed-bc9f-467c-a79c-cd3c7923383f" containerName="extract-utilities" Jan 21 17:00:00 crc kubenswrapper[5021]: I0121 17:00:00.149131 5021 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e5e10ed-bc9f-467c-a79c-cd3c7923383f" containerName="extract-utilities" Jan 21 17:00:00 crc kubenswrapper[5021]: I0121 17:00:00.149294 5021 memory_manager.go:354] "RemoveStaleState removing state" podUID="4e5e10ed-bc9f-467c-a79c-cd3c7923383f" containerName="registry-server" Jan 21 17:00:00 crc kubenswrapper[5021]: I0121 17:00:00.149831 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483580-7vkzj" Jan 21 17:00:00 crc kubenswrapper[5021]: I0121 17:00:00.151731 5021 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 21 17:00:00 crc kubenswrapper[5021]: I0121 17:00:00.152194 5021 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 21 17:00:00 crc kubenswrapper[5021]: I0121 17:00:00.159949 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483580-7vkzj"] Jan 21 17:00:00 crc kubenswrapper[5021]: I0121 17:00:00.264966 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3809e416-7493-4c08-9e2a-a3f609d3346b-config-volume\") pod \"collect-profiles-29483580-7vkzj\" (UID: \"3809e416-7493-4c08-9e2a-a3f609d3346b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483580-7vkzj" Jan 21 17:00:00 crc kubenswrapper[5021]: I0121 17:00:00.265243 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sh8c7\" (UniqueName: \"kubernetes.io/projected/3809e416-7493-4c08-9e2a-a3f609d3346b-kube-api-access-sh8c7\") pod \"collect-profiles-29483580-7vkzj\" (UID: \"3809e416-7493-4c08-9e2a-a3f609d3346b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483580-7vkzj" Jan 21 17:00:00 crc kubenswrapper[5021]: I0121 17:00:00.265419 5021 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3809e416-7493-4c08-9e2a-a3f609d3346b-secret-volume\") pod \"collect-profiles-29483580-7vkzj\" (UID: \"3809e416-7493-4c08-9e2a-a3f609d3346b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483580-7vkzj" Jan 21 17:00:00 crc kubenswrapper[5021]: I0121 17:00:00.367394 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3809e416-7493-4c08-9e2a-a3f609d3346b-secret-volume\") pod \"collect-profiles-29483580-7vkzj\" (UID: \"3809e416-7493-4c08-9e2a-a3f609d3346b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483580-7vkzj" Jan 21 17:00:00 crc kubenswrapper[5021]: I0121 17:00:00.367527 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3809e416-7493-4c08-9e2a-a3f609d3346b-config-volume\") pod \"collect-profiles-29483580-7vkzj\" (UID: \"3809e416-7493-4c08-9e2a-a3f609d3346b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483580-7vkzj" Jan 21 17:00:00 crc kubenswrapper[5021]: I0121 17:00:00.367583 5021 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sh8c7\" (UniqueName: \"kubernetes.io/projected/3809e416-7493-4c08-9e2a-a3f609d3346b-kube-api-access-sh8c7\") pod \"collect-profiles-29483580-7vkzj\" (UID: \"3809e416-7493-4c08-9e2a-a3f609d3346b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483580-7vkzj" Jan 21 17:00:00 crc kubenswrapper[5021]: I0121 17:00:00.369823 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3809e416-7493-4c08-9e2a-a3f609d3346b-config-volume\") pod \"collect-profiles-29483580-7vkzj\" (UID: \"3809e416-7493-4c08-9e2a-a3f609d3346b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483580-7vkzj" Jan 21 17:00:00 crc kubenswrapper[5021]: I0121 17:00:00.373315 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3809e416-7493-4c08-9e2a-a3f609d3346b-secret-volume\") pod \"collect-profiles-29483580-7vkzj\" (UID: \"3809e416-7493-4c08-9e2a-a3f609d3346b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483580-7vkzj" Jan 21 17:00:00 crc kubenswrapper[5021]: I0121 17:00:00.385255 5021 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sh8c7\" (UniqueName: \"kubernetes.io/projected/3809e416-7493-4c08-9e2a-a3f609d3346b-kube-api-access-sh8c7\") pod \"collect-profiles-29483580-7vkzj\" (UID: \"3809e416-7493-4c08-9e2a-a3f609d3346b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29483580-7vkzj" Jan 21 17:00:00 crc kubenswrapper[5021]: I0121 17:00:00.473449 5021 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483580-7vkzj" Jan 21 17:00:00 crc kubenswrapper[5021]: I0121 17:00:00.919586 5021 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483580-7vkzj"] Jan 21 17:00:01 crc kubenswrapper[5021]: I0121 17:00:01.463734 5021 generic.go:334] "Generic (PLEG): container finished" podID="3809e416-7493-4c08-9e2a-a3f609d3346b" containerID="b76e58c03939af171c42d4f7c94db59dc58af2a487c50dc10baaed821d60cc81" exitCode=0 Jan 21 17:00:01 crc kubenswrapper[5021]: I0121 17:00:01.464136 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29483580-7vkzj" event={"ID":"3809e416-7493-4c08-9e2a-a3f609d3346b","Type":"ContainerDied","Data":"b76e58c03939af171c42d4f7c94db59dc58af2a487c50dc10baaed821d60cc81"} Jan 21 17:00:01 crc kubenswrapper[5021]: I0121 17:00:01.464201 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29483580-7vkzj" event={"ID":"3809e416-7493-4c08-9e2a-a3f609d3346b","Type":"ContainerStarted","Data":"5a9f77d9b8fb58dae4590cc3cc79303bd551c312e64a65cfe181639c9706e011"} Jan 21 17:00:02 crc kubenswrapper[5021]: I0121 17:00:02.720479 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483580-7vkzj" Jan 21 17:00:02 crc kubenswrapper[5021]: I0121 17:00:02.811523 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3809e416-7493-4c08-9e2a-a3f609d3346b-config-volume\") pod \"3809e416-7493-4c08-9e2a-a3f609d3346b\" (UID: \"3809e416-7493-4c08-9e2a-a3f609d3346b\") " Jan 21 17:00:02 crc kubenswrapper[5021]: I0121 17:00:02.811596 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3809e416-7493-4c08-9e2a-a3f609d3346b-secret-volume\") pod \"3809e416-7493-4c08-9e2a-a3f609d3346b\" (UID: \"3809e416-7493-4c08-9e2a-a3f609d3346b\") " Jan 21 17:00:02 crc kubenswrapper[5021]: I0121 17:00:02.811685 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sh8c7\" (UniqueName: \"kubernetes.io/projected/3809e416-7493-4c08-9e2a-a3f609d3346b-kube-api-access-sh8c7\") pod \"3809e416-7493-4c08-9e2a-a3f609d3346b\" (UID: \"3809e416-7493-4c08-9e2a-a3f609d3346b\") " Jan 21 17:00:02 crc kubenswrapper[5021]: I0121 17:00:02.812606 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3809e416-7493-4c08-9e2a-a3f609d3346b-config-volume" (OuterVolumeSpecName: "config-volume") pod "3809e416-7493-4c08-9e2a-a3f609d3346b" (UID: "3809e416-7493-4c08-9e2a-a3f609d3346b"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 21 17:00:02 crc kubenswrapper[5021]: I0121 17:00:02.822235 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3809e416-7493-4c08-9e2a-a3f609d3346b-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "3809e416-7493-4c08-9e2a-a3f609d3346b" (UID: "3809e416-7493-4c08-9e2a-a3f609d3346b"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 21 17:00:02 crc kubenswrapper[5021]: I0121 17:00:02.833837 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3809e416-7493-4c08-9e2a-a3f609d3346b-kube-api-access-sh8c7" (OuterVolumeSpecName: "kube-api-access-sh8c7") pod "3809e416-7493-4c08-9e2a-a3f609d3346b" (UID: "3809e416-7493-4c08-9e2a-a3f609d3346b"). InnerVolumeSpecName "kube-api-access-sh8c7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 17:00:02 crc kubenswrapper[5021]: I0121 17:00:02.913548 5021 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3809e416-7493-4c08-9e2a-a3f609d3346b-config-volume\") on node \"crc\" DevicePath \"\"" Jan 21 17:00:02 crc kubenswrapper[5021]: I0121 17:00:02.913597 5021 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3809e416-7493-4c08-9e2a-a3f609d3346b-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 21 17:00:02 crc kubenswrapper[5021]: I0121 17:00:02.913611 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sh8c7\" (UniqueName: \"kubernetes.io/projected/3809e416-7493-4c08-9e2a-a3f609d3346b-kube-api-access-sh8c7\") on node \"crc\" DevicePath \"\"" Jan 21 17:00:03 crc kubenswrapper[5021]: I0121 17:00:03.479164 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29483580-7vkzj" event={"ID":"3809e416-7493-4c08-9e2a-a3f609d3346b","Type":"ContainerDied","Data":"5a9f77d9b8fb58dae4590cc3cc79303bd551c312e64a65cfe181639c9706e011"} Jan 21 17:00:03 crc kubenswrapper[5021]: I0121 17:00:03.479513 5021 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5a9f77d9b8fb58dae4590cc3cc79303bd551c312e64a65cfe181639c9706e011" Jan 21 17:00:03 crc kubenswrapper[5021]: I0121 17:00:03.479665 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29483580-7vkzj" Jan 21 17:00:03 crc kubenswrapper[5021]: I0121 17:00:03.793778 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483535-7gctb"] Jan 21 17:00:03 crc kubenswrapper[5021]: I0121 17:00:03.800335 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29483535-7gctb"] Jan 21 17:00:04 crc kubenswrapper[5021]: I0121 17:00:04.747321 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0427e77d-89b1-4caf-9b8d-8c5285bab4eb" path="/var/lib/kubelet/pods/0427e77d-89b1-4caf-9b8d-8c5285bab4eb/volumes" Jan 21 17:00:12 crc kubenswrapper[5021]: I0121 17:00:12.356745 5021 patch_prober.go:28] interesting pod/machine-config-daemon-n22xz container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 21 17:00:12 crc kubenswrapper[5021]: I0121 17:00:12.357295 5021 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 21 17:00:12 crc kubenswrapper[5021]: I0121 17:00:12.357339 5021 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" Jan 21 17:00:12 crc kubenswrapper[5021]: I0121 17:00:12.357816 5021 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c44b6b71bc63928326f9d442ea67fea899c05f9c5bf784990e39ce949f1c57f4"} pod="openshift-machine-config-operator/machine-config-daemon-n22xz" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 21 17:00:12 crc kubenswrapper[5021]: I0121 17:00:12.358691 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerName="machine-config-daemon" containerID="cri-o://c44b6b71bc63928326f9d442ea67fea899c05f9c5bf784990e39ce949f1c57f4" gracePeriod=600 Jan 21 17:00:13 crc kubenswrapper[5021]: I0121 17:00:13.554947 5021 generic.go:334] "Generic (PLEG): container finished" podID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" containerID="c44b6b71bc63928326f9d442ea67fea899c05f9c5bf784990e39ce949f1c57f4" exitCode=0 Jan 21 17:00:13 crc kubenswrapper[5021]: I0121 17:00:13.555009 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" event={"ID":"d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1","Type":"ContainerDied","Data":"c44b6b71bc63928326f9d442ea67fea899c05f9c5bf784990e39ce949f1c57f4"} Jan 21 17:00:13 crc kubenswrapper[5021]: I0121 17:00:13.555280 5021 scope.go:117] "RemoveContainer" containerID="19cd86e38752260a4dc9391c3046044ceae9db6deaa6db15fbd804a8964fbf1f" Jan 21 17:00:13 crc kubenswrapper[5021]: E0121 17:00:13.995147 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 17:00:14 crc kubenswrapper[5021]: I0121 17:00:14.563044 5021 scope.go:117] "RemoveContainer" containerID="c44b6b71bc63928326f9d442ea67fea899c05f9c5bf784990e39ce949f1c57f4" Jan 21 17:00:14 crc kubenswrapper[5021]: E0121 17:00:14.563296 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 17:00:28 crc kubenswrapper[5021]: I0121 17:00:28.749051 5021 scope.go:117] "RemoveContainer" containerID="c44b6b71bc63928326f9d442ea67fea899c05f9c5bf784990e39ce949f1c57f4" Jan 21 17:00:28 crc kubenswrapper[5021]: E0121 17:00:28.752089 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 17:00:39 crc kubenswrapper[5021]: I0121 17:00:39.737744 5021 scope.go:117] "RemoveContainer" containerID="c44b6b71bc63928326f9d442ea67fea899c05f9c5bf784990e39ce949f1c57f4" Jan 21 17:00:39 crc kubenswrapper[5021]: E0121 17:00:39.738554 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 17:00:49 crc kubenswrapper[5021]: I0121 17:00:49.936081 5021 scope.go:117] "RemoveContainer" containerID="6f195b716f9dfc8afaad5524ba92df98a80349401941691ec84e12b8bbd7e037" Jan 21 17:00:52 crc kubenswrapper[5021]: I0121 17:00:52.737659 5021 scope.go:117] "RemoveContainer" containerID="c44b6b71bc63928326f9d442ea67fea899c05f9c5bf784990e39ce949f1c57f4" Jan 21 17:00:52 crc kubenswrapper[5021]: E0121 17:00:52.738297 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 17:01:03 crc kubenswrapper[5021]: I0121 17:01:03.737958 5021 scope.go:117] "RemoveContainer" containerID="c44b6b71bc63928326f9d442ea67fea899c05f9c5bf784990e39ce949f1c57f4" Jan 21 17:01:03 crc kubenswrapper[5021]: E0121 17:01:03.739000 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 17:01:16 crc kubenswrapper[5021]: I0121 17:01:16.737742 5021 scope.go:117] "RemoveContainer" containerID="c44b6b71bc63928326f9d442ea67fea899c05f9c5bf784990e39ce949f1c57f4" Jan 21 17:01:16 crc kubenswrapper[5021]: E0121 17:01:16.738533 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 17:01:30 crc kubenswrapper[5021]: I0121 17:01:30.737816 5021 scope.go:117] "RemoveContainer" containerID="c44b6b71bc63928326f9d442ea67fea899c05f9c5bf784990e39ce949f1c57f4" Jan 21 17:01:30 crc kubenswrapper[5021]: E0121 17:01:30.738746 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 17:01:41 crc kubenswrapper[5021]: I0121 17:01:41.738662 5021 scope.go:117] "RemoveContainer" containerID="c44b6b71bc63928326f9d442ea67fea899c05f9c5bf784990e39ce949f1c57f4" Jan 21 17:01:41 crc kubenswrapper[5021]: E0121 17:01:41.739299 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 17:01:52 crc kubenswrapper[5021]: I0121 17:01:52.738118 5021 scope.go:117] "RemoveContainer" containerID="c44b6b71bc63928326f9d442ea67fea899c05f9c5bf784990e39ce949f1c57f4" Jan 21 17:01:52 crc kubenswrapper[5021]: E0121 17:01:52.738825 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 17:01:54 crc kubenswrapper[5021]: I0121 17:01:54.331835 5021 generic.go:334] "Generic (PLEG): container finished" podID="95f8dbfd-2482-4331-ad0f-292ffec962e8" containerID="435d87bf05a4a87e645c9c0c9adcb2a4c2ecb020bf32e96e0edf39c1edff5826" exitCode=0 Jan 21 17:01:54 crc kubenswrapper[5021]: I0121 17:01:54.332002 5021 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-kxfkv/must-gather-spnwq" event={"ID":"95f8dbfd-2482-4331-ad0f-292ffec962e8","Type":"ContainerDied","Data":"435d87bf05a4a87e645c9c0c9adcb2a4c2ecb020bf32e96e0edf39c1edff5826"} Jan 21 17:01:54 crc kubenswrapper[5021]: I0121 17:01:54.332669 5021 scope.go:117] "RemoveContainer" containerID="435d87bf05a4a87e645c9c0c9adcb2a4c2ecb020bf32e96e0edf39c1edff5826" Jan 21 17:01:55 crc kubenswrapper[5021]: I0121 17:01:55.307654 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-kxfkv_must-gather-spnwq_95f8dbfd-2482-4331-ad0f-292ffec962e8/gather/0.log" Jan 21 17:02:03 crc kubenswrapper[5021]: I0121 17:02:03.139972 5021 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-kxfkv/must-gather-spnwq"] Jan 21 17:02:03 crc kubenswrapper[5021]: I0121 17:02:03.140848 5021 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-kxfkv/must-gather-spnwq" podUID="95f8dbfd-2482-4331-ad0f-292ffec962e8" containerName="copy" containerID="cri-o://bc44bdeaf4006764235cdf749abd11cad00b2606d84854fcf6c01fd7af7e9683" gracePeriod=2 Jan 21 17:02:03 crc kubenswrapper[5021]: I0121 17:02:03.147466 5021 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-kxfkv/must-gather-spnwq"] Jan 21 17:02:03 crc kubenswrapper[5021]: I0121 17:02:03.393819 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-kxfkv_must-gather-spnwq_95f8dbfd-2482-4331-ad0f-292ffec962e8/copy/0.log" Jan 21 17:02:03 crc kubenswrapper[5021]: I0121 17:02:03.394371 5021 generic.go:334] "Generic (PLEG): container finished" podID="95f8dbfd-2482-4331-ad0f-292ffec962e8" containerID="bc44bdeaf4006764235cdf749abd11cad00b2606d84854fcf6c01fd7af7e9683" exitCode=143 Jan 21 17:02:04 crc kubenswrapper[5021]: I0121 17:02:04.187815 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-kxfkv_must-gather-spnwq_95f8dbfd-2482-4331-ad0f-292ffec962e8/copy/0.log" Jan 21 17:02:04 crc kubenswrapper[5021]: I0121 17:02:04.188208 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-kxfkv/must-gather-spnwq" Jan 21 17:02:04 crc kubenswrapper[5021]: I0121 17:02:04.311731 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k8bn8\" (UniqueName: \"kubernetes.io/projected/95f8dbfd-2482-4331-ad0f-292ffec962e8-kube-api-access-k8bn8\") pod \"95f8dbfd-2482-4331-ad0f-292ffec962e8\" (UID: \"95f8dbfd-2482-4331-ad0f-292ffec962e8\") " Jan 21 17:02:04 crc kubenswrapper[5021]: I0121 17:02:04.311785 5021 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/95f8dbfd-2482-4331-ad0f-292ffec962e8-must-gather-output\") pod \"95f8dbfd-2482-4331-ad0f-292ffec962e8\" (UID: \"95f8dbfd-2482-4331-ad0f-292ffec962e8\") " Jan 21 17:02:04 crc kubenswrapper[5021]: I0121 17:02:04.333465 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/95f8dbfd-2482-4331-ad0f-292ffec962e8-kube-api-access-k8bn8" (OuterVolumeSpecName: "kube-api-access-k8bn8") pod "95f8dbfd-2482-4331-ad0f-292ffec962e8" (UID: "95f8dbfd-2482-4331-ad0f-292ffec962e8"). InnerVolumeSpecName "kube-api-access-k8bn8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 21 17:02:04 crc kubenswrapper[5021]: I0121 17:02:04.404699 5021 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-kxfkv_must-gather-spnwq_95f8dbfd-2482-4331-ad0f-292ffec962e8/copy/0.log" Jan 21 17:02:04 crc kubenswrapper[5021]: I0121 17:02:04.405522 5021 scope.go:117] "RemoveContainer" containerID="bc44bdeaf4006764235cdf749abd11cad00b2606d84854fcf6c01fd7af7e9683" Jan 21 17:02:04 crc kubenswrapper[5021]: I0121 17:02:04.405762 5021 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-kxfkv/must-gather-spnwq" Jan 21 17:02:04 crc kubenswrapper[5021]: I0121 17:02:04.414501 5021 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k8bn8\" (UniqueName: \"kubernetes.io/projected/95f8dbfd-2482-4331-ad0f-292ffec962e8-kube-api-access-k8bn8\") on node \"crc\" DevicePath \"\"" Jan 21 17:02:04 crc kubenswrapper[5021]: I0121 17:02:04.432225 5021 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/95f8dbfd-2482-4331-ad0f-292ffec962e8-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "95f8dbfd-2482-4331-ad0f-292ffec962e8" (UID: "95f8dbfd-2482-4331-ad0f-292ffec962e8"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 21 17:02:04 crc kubenswrapper[5021]: I0121 17:02:04.443834 5021 scope.go:117] "RemoveContainer" containerID="435d87bf05a4a87e645c9c0c9adcb2a4c2ecb020bf32e96e0edf39c1edff5826" Jan 21 17:02:04 crc kubenswrapper[5021]: I0121 17:02:04.516132 5021 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/95f8dbfd-2482-4331-ad0f-292ffec962e8-must-gather-output\") on node \"crc\" DevicePath \"\"" Jan 21 17:02:04 crc kubenswrapper[5021]: I0121 17:02:04.745276 5021 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="95f8dbfd-2482-4331-ad0f-292ffec962e8" path="/var/lib/kubelet/pods/95f8dbfd-2482-4331-ad0f-292ffec962e8/volumes" Jan 21 17:02:05 crc kubenswrapper[5021]: I0121 17:02:05.737843 5021 scope.go:117] "RemoveContainer" containerID="c44b6b71bc63928326f9d442ea67fea899c05f9c5bf784990e39ce949f1c57f4" Jan 21 17:02:05 crc kubenswrapper[5021]: E0121 17:02:05.738070 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 17:02:19 crc kubenswrapper[5021]: I0121 17:02:19.737692 5021 scope.go:117] "RemoveContainer" containerID="c44b6b71bc63928326f9d442ea67fea899c05f9c5bf784990e39ce949f1c57f4" Jan 21 17:02:19 crc kubenswrapper[5021]: E0121 17:02:19.738343 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 17:02:33 crc kubenswrapper[5021]: I0121 17:02:33.737857 5021 scope.go:117] "RemoveContainer" containerID="c44b6b71bc63928326f9d442ea67fea899c05f9c5bf784990e39ce949f1c57f4" Jan 21 17:02:33 crc kubenswrapper[5021]: E0121 17:02:33.739021 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 17:02:45 crc kubenswrapper[5021]: I0121 17:02:45.737876 5021 scope.go:117] "RemoveContainer" containerID="c44b6b71bc63928326f9d442ea67fea899c05f9c5bf784990e39ce949f1c57f4" Jan 21 17:02:45 crc kubenswrapper[5021]: E0121 17:02:45.738839 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 17:02:49 crc kubenswrapper[5021]: I0121 17:02:49.998682 5021 scope.go:117] "RemoveContainer" containerID="47e50d0e321ab652435b414ed3e453b867db161360f13c7903d6cb174a61f2e6" Jan 21 17:02:50 crc kubenswrapper[5021]: I0121 17:02:50.030609 5021 scope.go:117] "RemoveContainer" containerID="940be4333ed6e9b42bbb50ef75e3610c15e78ae5eb49eaf133a9aa6c9766d353" Jan 21 17:02:50 crc kubenswrapper[5021]: I0121 17:02:50.046626 5021 scope.go:117] "RemoveContainer" containerID="2ad9ee951330ab1d37d557e0a561481b2daed0e0916e81e617f3c9816b61a6fb" Jan 21 17:02:57 crc kubenswrapper[5021]: I0121 17:02:57.738397 5021 scope.go:117] "RemoveContainer" containerID="c44b6b71bc63928326f9d442ea67fea899c05f9c5bf784990e39ce949f1c57f4" Jan 21 17:02:57 crc kubenswrapper[5021]: E0121 17:02:57.740202 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 17:03:08 crc kubenswrapper[5021]: I0121 17:03:08.742358 5021 scope.go:117] "RemoveContainer" containerID="c44b6b71bc63928326f9d442ea67fea899c05f9c5bf784990e39ce949f1c57f4" Jan 21 17:03:08 crc kubenswrapper[5021]: E0121 17:03:08.743206 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 17:03:19 crc kubenswrapper[5021]: I0121 17:03:19.737592 5021 scope.go:117] "RemoveContainer" containerID="c44b6b71bc63928326f9d442ea67fea899c05f9c5bf784990e39ce949f1c57f4" Jan 21 17:03:19 crc kubenswrapper[5021]: E0121 17:03:19.738406 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 17:03:33 crc kubenswrapper[5021]: I0121 17:03:33.737946 5021 scope.go:117] "RemoveContainer" containerID="c44b6b71bc63928326f9d442ea67fea899c05f9c5bf784990e39ce949f1c57f4" Jan 21 17:03:33 crc kubenswrapper[5021]: E0121 17:03:33.739189 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" Jan 21 17:03:45 crc kubenswrapper[5021]: I0121 17:03:45.738807 5021 scope.go:117] "RemoveContainer" containerID="c44b6b71bc63928326f9d442ea67fea899c05f9c5bf784990e39ce949f1c57f4" Jan 21 17:03:45 crc kubenswrapper[5021]: E0121 17:03:45.739901 5021 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-n22xz_openshift-machine-config-operator(d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1)\"" pod="openshift-machine-config-operator/machine-config-daemon-n22xz" podUID="d40ced49-d7c3-4d2a-ba74-85d60a4cf3b1" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515134203403024440 0ustar coreroot  Om77'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015134203403017355 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015134167243016513 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015134167243015463 5ustar corecore